code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import sys
from pyspark.sql.functions import *
from pyspark.sql import Row
from pyspark.sql import SparkSession
from AQPython.Annotation import *
spark = SparkSession.builder.getOrCreate()
def FilterProperty(df, name, value="", valueArr=[], valueCompare="=", limit=0, negate=False):
"""Provide the ability to filter a Dataframe of AQAnnotations based on the value matching the specified property value in the map.
Args:
df: Dataframe of AQAnnotations that will be filtered by the specified property name and value.
name: Name of the property to filter.
value: Value of the named property to filter.
valueArr: The array of values of the named property to filter. An OR will be applied to the Strings. Only used if value was not specified.
valueCompare: Comparison operator to use for the property filter. Default is '='. Possible values are '=' and '!=' when valueArr specified. Possible values are '=','!=','<','<=','>', and '>=' otherwise.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query. Default is false.
Returns:
Dataframe of AQAnnotations
"""
query = ""
if value != "":
query += ("properties.`" + name + "` " + valueCompare + " '" + value + "'")
elif len(valueArr) > 0:
if valueCompare == '=':
query += ("properties.`" + name + "` in " + "('" + "','".join(map(str,valueArr)) + "')")
else:
query += ("properties.`" + name + "` not in " + "('" + "','".join(map(str,valueArr)) + "')")
if negate:
query = "!(" + query + ")"
results = df.filter(query)
if limit > 0:
results = results.limit(limit)
return results
def RegexProperty(df, name, regex, limit=0, negate=False):
"""Provide the ability to filter a Dataframe of AQAnnotations based on the regex applied to the specified property value in the map.
Args:
df: Dataframe of AQAnnotations that will be filtered by the specified property name and regex expression.
name: Name of the property to filter.
regex: Regex expression to use for the filter.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query. Default is false.
Returns:
Dataframe of AQAnnotations
"""
query = "properties.`" + name + "` rlike " + "'" + regex + "'"
if negate:
query = "!(" + query + ")"
results = df.filter(query)
if limit > 0:
results = results.limit(limit)
return results
def FilterSet(df, annotSet="", annotSetArr=[], annotSetCompare="=", limit=0, negate=False):
"""Provide the ability to filter a Dataframe of AQAnnotations based on the value in the annotSet field.
Args:
df: Dataframe of AQAnnotations that will be filtered by the specified annotation set.
annotSet: String to filter against the annotSet field in the dataset of AQAnnotations.
annotSetArr: Array of Strings to filter against the annotSet field in the dataframe of AQAnnotations. An OR will be applied to the Strings. Only used if annotSet was not specified.
annotSetCompare: Comparison operator to use for the annotSet field in the dataframe of AQAnnotations. Default is '='. Possible values are '=' and '!='.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query. Default is false.
Returns:
Dataframe of AQAnnotations
"""
query = ""
if annotSet != "":
query += ("annotSet " + annotSetCompare + " \"" + annotSet + "\"")
elif len(annotSetArr) > 0:
if annotSetCompare == "=":
query += ("annotSet in " + "('" + "','".join(map(str,annotSetArr)) + "')")
else:
query += ("annotSet not in " + "('" + "','".join(map(str,annotSetArr)) + "')")
if negate:
query = "!(" + query + ")"
results = df.filter(query)
if limit > 0:
results = results.limit(limit)
return results
def FilterType(df, annotType="", annotTypeArr=[], annotTypeCompare="=", limit=0, negate=False):
"""Provide the ability to filter a Dataframe of AQAnnotations based on the value in the annotType field.
Args:
df: Dataframe of AQAnnotations that will be filtered by the specified annotation type.
annotType: String to filter against the annotType field in the dataframe of AQAnnotations.
annotTypeArr: Array of Strings to filter against the annotType field in the dataframe of AQAnnotations. An OR will be applied to the Strings. Only used if annotType was not specified.
annotTypeCompare: Comparison operator to use for the annotType field in the dataframe of AQAnnotations. Default is '='. Possible values are '=' and '!='.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query. Default is false.
Returns:
Dataframe of AQAnnotations
"""
query = ""
if annotType != "":
query += ("annotType " + annotTypeCompare + " \"" + annotType + "\"")
elif len(annotTypeArr) > 0:
if annotTypeCompare == "=":
query += ("annotType in " + "('" + "','".join(map(str,annotTypeArr)) + "')")
else:
query += ("annotType not in " + "('" + "','".join(map(str,annotTypeArr)) + "')")
if negate:
query = "!(" + query + ")"
results = df.filter(query)
if limit > 0:
results = results.limit(limit)
return results
def Contains(left, right, limit=0, negate=False):
"""Provide the ability to find annotations that contain another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that contain B. What that means is the start/end offset for an annotation from A must contain the start/end offset from an annotation in B.
The start/end offsets are inclusive. We ultimately return the container annotations (A) that meet this criteria.
We also deduplicate the A annotations as there could be many annotations from B that could be contained by an annotation in A but it only makes sense to return the unique container annotations.
There is also the option of negating the query (think Not Contains) so that we return only A where it does not contain B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they contain AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they occur in the AQAnnotations from 'left'.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT contains). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") <= col("R_startOffset")) &
(col("L.endOffset") >= col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") <= col("R_startOffset")) &
(col("L.endOffset") >= col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def ContainedIn(left, right, limit=0, negate=False):
"""Provide the ability to find annotations that are contained by another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are contained in B.
What that means is the start/end offset for an annotation from A must be contained by the start/end offset from an annotation in B.
The start/end offsets are inclusive. We ultimately return the contained annotations (A) that meet this criteria.
There is also the option of negating the query (think Not Contains) so that we return only A where it is not contained in B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are contained in AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they contain AQAnnotations from 'left'.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT contained in). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_startOffset")) &
(col("L.endOffset") <= col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_startOffset")) &
(col("L.endOffset") <= col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def ContainedInList(left, right):
"""Provide the ability to find annotations that are contained by another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are contained in B. What that means is the start/end offset for an annotation from A must be contained by the start/end offset from an annotation in B.
We of course have to also match on the document id.
We ultimately return a Dataframe with 2 fields where the first field is an annotation from B and the second field is an array of entries from A
that are contained in the first entry.
Args:
left: Dataframe of AQAnnotations, the ones we will return (as a list) if they are contained in AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they contain AQAnnotations from 'left'.
Returns:
Dataframe of (AQAnnotations,Array[AQAnnotations])
"""
def containedAQ(rec):
# Sort the contained annotations
#srecs = sorted(rec[1], key=lambda x: (-1 if x.LendOffset == None else x.LendOffset),reverse=True)
srecs = sorted(rec[1], key=lambda x: (1 if x.LstartOffset == None else x.LstartOffset),reverse=False)
# We can extract the key from the any 'right' entry in sorted recs (we will use the first one)
key = Row(docId = srecs[0].RdocId,
annotSet = srecs[0].RannotSet,
annotType = srecs[0].RannotType,
startOffset = int(srecs[0].RstartOffset),
endOffset = int(srecs[0].RendOffset),
annotId = int(srecs[0].RannotId),
properties = srecs[0].Rproperties)
# Construct the array
values = []
for rec in srecs:
if rec.LdocId != None:
values.append(Row(docId = rec.LdocId,
annotSet = rec.LannotSet,
annotType = rec.LannotType,
startOffset = int(rec.LstartOffset),
endOffset = int(rec.LendOffset),
annotId = int(rec.LannotId),
properties = rec.Lproperties))
return(key,values)
l = left.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("LannotId","LannotSet","LannotType","LdocId","LendOffset","Lproperties","LstartOffset")
r = right.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("RannotId","RannotSet","RannotType","RdocId","RendOffset","Rproperties","RstartOffset")
results = l.join(r,
((col("LdocId") == col("RdocId")) &
(col("LstartOffset") >= col("RstartOffset")) &
(col("LendOffset") <= col("RendOffset")) &
(~((col("LannotSet") == col("RannotSet")) &
(col("LannotType") == col("RannotType")) &
(col("LstartOffset") == col("RstartOffset")) &
(col("LendOffset") == col("RendOffset")))))) \
.rdd \
.groupBy(lambda x: (x["RdocId"],x["RstartOffset"],x["RendOffset"])) \
.map(lambda rec: containedAQ(rec))
return spark.createDataFrame(results.map(lambda x: x),AQSchemaList())
def Before(left, right, dist=sys.maxsize , limit=0, negate=False):
"""Provide the ability to find annotations that are before another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are before B.
What that means is the end offset for an annotation from A must be before (or equal to) the start offset from an annotation in B.
We ultimately return the A annotations that meet this criteria.
A distance operator can also be optionally specified.
This would require an A annotation (endOffset) to occur n characters (or less) before the B annotation (startOffset).
There is also the option of negating the query (think Not Before) so that we return only A where it is not before B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are before AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if are after AQAnnotations from 'left'.
dist: Number of characters where endOffset from 'left' must occur before startOffset from 'right'. Default is sys.maxsize.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT before). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("R_startOffset") >= col("L.endOffset")) &
(col("R_startOffset") - col("L.endOffset") < dist)) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset")))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("R_startOffset") >= col("L.endOffset")) &
(col("R_startOffset") - col("L.endOffset") < dist) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def After(left, right, dist=sys.maxsize , limit=0, negate=False):
"""Provide the ability to find annotations that are after another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are after B.
What that means is the start offset for an annotation from A must be after (or equal to) the end offset from an annotation in B.
We ultimately return the A annotations that meet this criteria.
A distance operator can also be optionally specified.
This would require an A annotation (startOffset) to occur n characters (or less) after the B annotation (endOffset).
There is also the option of negating the query (think Not After) so that we return only A where it is not after B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are after AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if are before AQAnnotations from 'left'.
dist: Number of characters where startOffset from 'left' must occur after endOffset from 'right'. Default is sys.maxsize.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT after). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_endOffset")) &
(col("L.startOffset") - col("R_endOffset") < dist)) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset")))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_endOffset")) &
(col("L.startOffset") - col("R_endOffset") < dist) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def Between(middle, left, right, dist=sys.maxsize , limit=0, negate=False):
"""Provide the ability to find annotations that are before one annotation and after another.
The input is 3 Dataframes of AQAnnotations. We will call them A, B and C.
The purpose is to find those annotations in A that are before B and after C.
What that means is the end offset for an annotation from A must be before (or equal to) the start offset from an annotation in B and the start offset for A be after (or equal to) the end offset from C.
We ultimately return the A annotations that meet this criteria.
A distance operator can also be optionally specified.
This would require an A annotation (endOffset) to occur n characters (or less) before the B annotation (startOffset) and would require the A annotation (startOffset) to occur n characters (or less) after the C annotation (endOffset) .
There is also the option of negating the query (think Not Between) so that we return only A where it is not before B nor after C.
Args:
middle: Dataframe of AQAnnotations, the ones we will return if they are between AQAnnotations from 'left' and AQAnnotations from 'right.
left: Dataframe of AQAnnotations, the ones we are looking to see if they are before AQAnnotations from 'middle'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they are after AQAnnotations from 'middle'.
dist: Number of characters where startOffset from 'middle' must occur after endOffset of 'left' or endOffset from 'middle' must occur before startOffset of 'right'
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT between). Default is false.
Returns:
Dataframe of AQAnnotations
"""
intermediate = None
intermediate2 = None
results = None
if negate:
intermediate = middle.alias("L").join(right.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("R.startOffset") >= col("L.endOffset")) &
(col("R.startOffset") - col("L.endOffset") < dist)) &
(~((col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.startOffset") == col("R.startOffset")) &
(col("L.endOffset") == col("R.endOffset")))),"leftsemi")
intermediate2 = intermediate.alias("L").join(left.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("L.startOffset") >= col("R.endOffset")) &
(col("L.startOffset") - col("R.endOffset") < dist) &
(~((col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.annotId") == col("R.annotId")) &
(col("L.endOffset") == col("R.endOffset"))))),"leftsemi")
results = middle.alias("L").join(intermediate2.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.annotId") == col("R.annotId"))),"leftouter") \
.filter(col("R.docId").isNull()) \
.select("L.*")
else:
intermediate = middle.alias("L").join(right.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("R.startOffset") >= col("L.endOffset")) &
(col("R.startOffset") - col("L.endOffset") < dist)) &
(~((col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.startOffset") == col("R.startOffset")) &
(col("L.endOffset") == col("R.endOffset")))),"leftsemi")
results = intermediate.alias("L").join(left.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("L.startOffset") >= col("R.endOffset")) &
(col("L.startOffset") - col("R.endOffset") < dist) &
(~((col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.startOffset") == col("R.startOffset")) &
(col("L.endOffset") == col("R.endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def Sequence(left, right, dist=sys.maxsize, limit=0):
"""Provide the ability to find annotations that are before another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are before B.
What that means is the end offset for an annotation from A must be before (or equal to) the start offset from an annotation in B.
We ultimately return the annotations that meet this criteria.
Unlike the Before function, we adjust the returned annotation a bit.
For example, we set the annotType to "seq" and we use the A startOffset and the B endOffset.
A distance operator can also be optionally specified. This would require an A annotation (endOffset) to occur n characters (or less) before the B annotation (startOffset).
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are before AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if are after AQAnnotations from 'left'.
dist: Number of characters where endOffset from 'left' must occur before startOffset from 'right'. Default is sys.maxsize.
limit: Number of AQAnnotations to return.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("R_startOffset") >= col("L.endOffset")) &
(col("R_startOffset") - col("L.endOffset") < dist)) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))) \
.select("L.docId", "L.annotSet", "L.startOffset", "R_endOffset", "L.annotId") \
.withColumnRenamed("R_endOffset", "endOffset") \
.withColumn("annotType",lit("seq")) \
.withColumn("properties",lit(None)) \
.dropDuplicates(["docId","annotSet","annotType","annotId","startOffset","endOffset"])
if limit > 0:
results = results.limit(limit)
return results
def Or(left, right, limit=0):
"""Provide the ability to combine (union) Dataframes of AQAnnotations.
The input is 2 Dataframes of AQAnnotations. The output is the union of these annotations.
Args:
left: Dataframe of AQAnnotations
right: Dataframe of AQAnnotations
limit: Number of AQAnnotations to return.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Will change number of partitions (and impact performance)
results = left.union(right) \
.dropDuplicates(["docId","annotSet","annotType","annotId","startOffset","endOffset"])
if limit > 0:
results = results.limit(limit)
return results
def And(left, right, limit=0, negate=False, leftOnly=True):
"""Provide the ability to find annotations that are in the same document.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A and B that are in the same document.
Args:
left: Dataframe of AQAnnotations
right: Dataframe of AQAnnotations.
limit: Number of AQAnnotations to return.
negate: think and NOT (only return annotations from A that are not in B). Default is false.
leftOnly: Reuturn only the left or the left and right. The default is to only return the left.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
tmpLeft = left.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("L_annotId", "L_annotSet", "L_annotType", "L_docId", "L_endOffset", "L_startOffset", 'L_properties')
if negate:
results = left.alias("L").join(tmpRight.select("R_docId").distinct(),
(col("L.docId") == col("R_docId")),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
if leftOnly:
results = left.alias("L").join(tmpRight.select("R_docId").distinct(),
(col("L.docId") == col("R_docId")),"leftsemi")
else:
a = left.alias("L").join(tmpRight.select("R_docId").distinct(),
(col("L.docId") == col("R_docId")),"leftsemi")
b = right.alias("L").join(tmpLeft.select("L_docId").distinct(),
(col("L.docId") == col("L_docId")),"leftsemi")
# Will change number of partitions (and impact performance)
results = a.union(b) \
.dropDuplicates(["docId","annotSet","annotType","annotId","startOffset","endOffset"])
if limit > 0:
results = results.limit(limit)
return results
def MatchProperty(left, right, name, negate=False, limit=0):
"""Provide the ability to find annotations (looking at their property) that are in the same document.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are in the same document as B and also match values on the specified property.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they match AQAnnotations from 'right'.
right: Dataframe of AQAnnotations the ones we are looking to see if they match AQAnnotations from 'left'.
name: Name of the property to match.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT contains). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.properties.`" + name + "`") == col("R_properties.`" + name + "`"))) ,"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.properties.`" + name + "`") == col("R_properties.`" + name + "`"))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def Preceding(annot, anchor, container=None, cnt=3):
"""Provide the ability to find the preceding sibling annotations for every annotation in the anchor Dataframe of AQAnnotations.
The preceding sibling annotations can optionally be required to be contained in a container Dataframe of AQAnnotations.
The return type of this function is different from other functions.
Instead of returning a Dataframe of AQAnnotations this function returns a Dataframe of (AQAnnotation,Array[AQAnnotation]).
Args:
annot: Dataframe of AQAnnotations, the ones we will be using to look for preceding sibling annotations.
anchor: Dataframe of AQAnnotations starting point for using to look for preceding sibling annotations (use the startOffset and docId).
container: Dataframe of AQAnnotations to use when requiring the preceding sibling annotations to be contained in a specific annotation.
cnt: Number of preceding sibling AQAnnotations to return.
Returns:
Dataframe of (AQAnnotation,Array[AQAnnotation])
"""
# Get the preceding annotations
def precedingAQ(rec,cnt):
# Sort the preceding annotations (limit the number of results to the cnt)
srecs = sorted(rec[1], key=lambda x: (-1 if x.LendOffset == None else x.LendOffset),reverse=True)[0:cnt]
# We can extract the key from the any 'right' entry in sorted recs (we will use the first one)
key = Row(docId = srecs[0].RdocId,
annotSet = srecs[0].RannotSet,
annotType = srecs[0].RannotType,
startOffset = int(srecs[0].RstartOffset),
endOffset = int(srecs[0].RendOffset),
annotId = int(srecs[0].RannotId),
properties = srecs[0].Rproperties)
# Construct the array
values = []
for rec in srecs:
if rec.LdocId != None:
values.append(Row(docId = rec.LdocId,
annotSet = rec.LannotSet,
annotType = rec.LannotType,
startOffset = int(rec.LstartOffset),
endOffset = int(rec.LendOffset),
annotId = int(rec.LannotId),
properties = rec.Lproperties))
return(key,values)
# Get the preceding 'contained' annotations
def precedingContainedAQ(rec):
if rec.CdocId == None:
return (rec.annot,[])
else:
values = []
for entry in rec.annots:
if (entry.startOffset >= rec.CstartOffset) and (entry.endOffset <= rec.CendOffset):
values.append(entry)
return (rec.annot,values)
l = annot.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("LannotId","LannotSet","LannotType","LdocId","LendOffset","Lproperties","LstartOffset")
r = anchor.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("RannotId","RannotSet","RannotType","RdocId","RendOffset","Rproperties","RstartOffset")
# Group on the anchor annotation
results = l.join(r,
(col("LdocId") == col("RdocId")) &
(col("LendOffset") <= col("RstartOffset")),
"rightouter") \
.rdd \
.groupBy(lambda x: (x["RdocId"],x["RstartOffset"],x["RendOffset"])) \
.map(lambda rec: precedingAQ(rec,cnt))
results = spark.createDataFrame(results.map(lambda x: x),AQSchemaList())
if (container != None) and (not(container.rdd.isEmpty())):
c = container.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("CannotId","CannotSet","CannotType","CdocId","CendOffset","Cproperties","CstartOffset")
cResults = results.join(c,
(col("annot.docId") == col("CdocId")) &
(col("annot.startOffset") >= col("CstartOffset")) &
(col("annot.endOffset") <= col("CendOffset")),
"leftouter") \
.rdd \
.map(lambda rec: precedingContainedAQ(rec))
# Need to drop duplicates
return spark.createDataFrame(cResults.map(lambda x: x),AQSchemaList())
else:
return results
def Following(annot, anchor, container=None, cnt=3):
"""Provide the ability to find the following sibling annotations for every annotation in the anchor Dataframe of AQAnnotations.
The following sibling annotations can optionally be required to be contained in a container Dataframe of AQAnnotations.
The return type of this function is different from other functions.
Instead of returning a Dataframe of AQAnnotations this function returns a Dataframe (AQAnnotation,Array[AQAnnotation]).
Args:
annot: Dataframe of AQAnnotations, the ones we will be using to look for following sibling annotations.
anchor: Dataframe of AQAnnotations starting point for using to look for following sibling annotations (use the endOffset and docId).
container: Dataframe of AQAnnotations to use when requiring the following sibling annotations to be contained in a specific annotation.
cnt: Number of preceding sibling AQAnnotations to return.
Returns:
Dataframe of (AQAnnotation,Array[AQAnnotation])
"""
# Get the following annotations
def followingAQ(rec,cnt):
# Sort the following annotations (limit the number of results to the cnt)
srecs = sorted(rec[1], key=lambda x: (-1 if x.LstartOffset == None else x.LstartOffset))[0:cnt]
# We can extract the key from the any 'right' entry in sorted recs (we will use the first one)
key = Row(docId = srecs[0].RdocId,
annotSet = srecs[0].RannotSet,
annotType = srecs[0].RannotType,
startOffset = int(srecs[0].RstartOffset),
endOffset = int(srecs[0].RendOffset),
annotId = int(srecs[0].RannotId),
properties = srecs[0].Rproperties)
# Construct the array
values = []
for rec in srecs:
if rec.LdocId != None:
values.append(Row(docId = rec.LdocId,
annotSet = rec.LannotSet,
annotType = rec.LannotType,
startOffset = int(rec.LstartOffset),
endOffset = int(rec.LendOffset),
annotId = int(rec.LannotId),
properties = rec.Lproperties))
return(key,values)
# Get the following 'contained' annotations
def followingContainedAQ(rec):
if rec.CdocId == None:
return (rec.annot,[])
else:
values = []
for entry in rec.annots:
if (entry.startOffset >= rec.CstartOffset) and (entry.endOffset <= rec.CendOffset):
values.append(entry)
return (rec.annot,values)
l = annot.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("LannotId","LannotSet","LannotType","LdocId","LendOffset","Lproperties","LstartOffset")
r = anchor.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("RannotId","RannotSet","RannotType","RdocId","RendOffset","Rproperties","RstartOffset")
# Group on the anchor annotation
results = l.join(r,
(col("LdocId") == col("RdocId")) &
(col("LstartOffset") >= col("RendOffset")),
"rightouter") \
.rdd \
.groupBy(lambda x: (x["RdocId"],x["RstartOffset"],x["RendOffset"])) \
.map(lambda rec: followingAQ(rec,cnt))
results = spark.createDataFrame(results.map(lambda x: x),AQSchemaList())
if (container != None) and (not(container.rdd.isEmpty())):
c = container.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("CannotId","CannotSet","CannotType","CdocId","CendOffset","Cproperties","CstartOffset")
cResults = results.join(c,
(col("annot.docId") == col("CdocId")) &
(col("annot.startOffset") >= col("CstartOffset")) &
(col("annot.endOffset") <= col("CendOffset")),
"leftouter") \
.rdd \
.map(lambda rec: followingContainedAQ(rec))
# Need to drop duplicates
return spark.createDataFrame(cResults.map(lambda x: x),AQSchemaList())
else:
return results
def TokensSpan(tokens, spans, tokenProperty):
"""Provides the ability to create a string from a list of tokens that are contained in a span.
The specified tokenProperty is used to extract the values from the tokens when creating the string.
For SCNLP, this tokenProperty could be values like 'orig', 'lemma', or 'pos'. The spans would typically be a SCNLP 'sentence' or could even be things like an OM 'ce:para'.
Args:
tokens: Dataframe of AQAnnotations (which we will use to concatenate for the string)
spans: Dataframe of AQAnnotations (identifies the start/end for the tokens to be used for the concatenated string)
tokenProperty: The property field in the tokens to use for extracting the value for the concatenated string
Returns:
Dataframe[AQAnnotation] spans with 3 new properties all prefixed with the specified tokenProperty value followed by (ToksStr, ToksSpos, ToksEpos) The ToksStr property will be the
concatenated string of token property values contained in the span. The ToksSPos and ToksEpos are properties that will help us determine the start/end offset for each of the individual tokens in the ToksStr.
These helper properties are needed for the function RegexTokensSpan so we can generate accurate accurate start/end offsets based on the str file.
"""
def process(rec):
span = rec[0]
tokens = rec[1]
newProps = {}
oldProps = span.properties
for key in oldProps.keys():
newProps[key] = oldProps[key]
toksStr = []
toksSpos = []
toksEpos = []
offset = 0
for token in tokens:
tokeStr = ""
if (token.properties != None) and (tokenProperty in token.properties):
tokStr = token.properties[tokenProperty]
toksStr.append(tokStr)
toksSpos.append((str(offset) + "|" + str(token.startOffset)))
offset += len(tokStr)
toksEpos.append((str(offset) + "|" + str(token.endOffset)))
offset += 1
newProps[tokenProperty + "ToksStr"] = " ".join(toksStr)
newProps[tokenProperty + "ToksSpos"] = " ".join(toksSpos)
newProps[tokenProperty + "ToksEpos"] = " ".join(toksEpos)
return Row(docId = span.docId,
annotSet = span.annotSet,
annotType = span.annotType,
startOffset = span.startOffset,
endOffset = span.endOffset,
annotId = span.annotId,
properties = newProps)
results = ContainedInList(tokens,spans).rdd.map(lambda rec: process(rec))
return spark.createDataFrame(results.map(lambda x: x),AQSchema())
def RegexTokensSpan(tokensSpan, prop, regex, annotSet="",annotType="", annotProps={}):
"""Provides the ability to apply a regular expression to the concatenated string generated by TokensSpan.
For the strings matching the regex,a Dataframe[AQAnnotations] will be returned.
The AQAnnotation will correspond to the offsets within the concatenated string containing the match.
Args:
tokensSpan: Datafrane of AQAnnotations (the annotations returned from the TokensSpan function)
prop: the property name (orig, lemma, pos) that was used to generate the string for the span in TokensSpan
regex: the regular expression to apply to the span
annotSet: the value to assign to annotSet for the returned matched annotations (default will be the annotSet from the tokensSpan)
annotType: the value to assign to annotType for the returned matched annotations (default will be the annotType from the tokensSpan)
annotProps: the additional properties to append to the properties map for the returned matched annotations
Returns:
Dataframe[AQAnnotation] for the strings matching the regex
"""
def process(partition,prop,regex,annotSet,annotType,annotProps):
import regex as re
import builtins as py_builtin
results = []
annotId = 0
pattern = re.compile(regex)
for rec in partition:
if (rec.properties != None) and (prop+"ToksStr" in rec.properties):
span = rec.properties[prop+"ToksStr"]
for match in re.finditer(pattern, span):
annotId += 1
newAnnotSet = annotSet
newAnnotType = annotType
if (annotSet == ""):
newAnnotSet = rec.annotSet
if (annotType == ""):
newAnnotType = rec.annotType
props = {}
oldProps = rec.properties
for key in annotProps.keys():
props[key] = annotProps[key]
# start
startPos = -1
startPosLB = []
for start in oldProps[prop+"ToksSpos"].split(" "):
startToks = start.split("|")
if int(startToks[0]) == match.start():
startPos = int(startToks[1])
if int(startToks[0]) < match.start():
startPosLB.append(int(startToks[1]))
if startPos == -1:
startPos = py_builtin.max(startPosLB)
# end
endPos = -1
endPosLB = []
for end in oldProps[prop+"ToksEpos"].split(" "):
endToks = end.split("|")
if int(endToks[0]) == match.end():
endPos = int(endToks[1])
if int(endToks[0]) > match.end():
endPosLB.append(int(endToks[1]))
if endPos == -1:
endPos = py_builtin.min(endPosLB)
props[prop+"Match"] = span[match.start():match.end()]
# get the excludes from the span (but only include those contained in within the match)
for key in oldProps.keys():
if key == "excludes":
excludesLB = []
for exclude in oldProps[key].split("|"):
arr = exclude.split(",")
excludeStart = int(arr[3])
excludeEnd = int(arr[4])
if excludeStart >= startPos and excludeEnd <= endPos:
excludesLB.append(exclude)
if len(excludesLB):
props["excludes"] = "|".join(excludesLB)
annot = Row(docId = rec.docId,
annotSet = newAnnotSet,
annotType = newAnnotType,
startOffset = startPos,
endOffset = endPos,
annotId = annotId,
properties = props)
results.append(annot)
return iter(results)
results = tokensSpan.rdd.mapPartitions(lambda partition: process(partition,prop,regex,annotSet,annotType,annotProps))
return spark.createDataFrame(results.map(lambda x: x),AQSchema()) | AQPython/Query.py | import sys
from pyspark.sql.functions import *
from pyspark.sql import Row
from pyspark.sql import SparkSession
from AQPython.Annotation import *
spark = SparkSession.builder.getOrCreate()
def FilterProperty(df, name, value="", valueArr=[], valueCompare="=", limit=0, negate=False):
"""Provide the ability to filter a Dataframe of AQAnnotations based on the value matching the specified property value in the map.
Args:
df: Dataframe of AQAnnotations that will be filtered by the specified property name and value.
name: Name of the property to filter.
value: Value of the named property to filter.
valueArr: The array of values of the named property to filter. An OR will be applied to the Strings. Only used if value was not specified.
valueCompare: Comparison operator to use for the property filter. Default is '='. Possible values are '=' and '!=' when valueArr specified. Possible values are '=','!=','<','<=','>', and '>=' otherwise.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query. Default is false.
Returns:
Dataframe of AQAnnotations
"""
query = ""
if value != "":
query += ("properties.`" + name + "` " + valueCompare + " '" + value + "'")
elif len(valueArr) > 0:
if valueCompare == '=':
query += ("properties.`" + name + "` in " + "('" + "','".join(map(str,valueArr)) + "')")
else:
query += ("properties.`" + name + "` not in " + "('" + "','".join(map(str,valueArr)) + "')")
if negate:
query = "!(" + query + ")"
results = df.filter(query)
if limit > 0:
results = results.limit(limit)
return results
def RegexProperty(df, name, regex, limit=0, negate=False):
"""Provide the ability to filter a Dataframe of AQAnnotations based on the regex applied to the specified property value in the map.
Args:
df: Dataframe of AQAnnotations that will be filtered by the specified property name and regex expression.
name: Name of the property to filter.
regex: Regex expression to use for the filter.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query. Default is false.
Returns:
Dataframe of AQAnnotations
"""
query = "properties.`" + name + "` rlike " + "'" + regex + "'"
if negate:
query = "!(" + query + ")"
results = df.filter(query)
if limit > 0:
results = results.limit(limit)
return results
def FilterSet(df, annotSet="", annotSetArr=[], annotSetCompare="=", limit=0, negate=False):
"""Provide the ability to filter a Dataframe of AQAnnotations based on the value in the annotSet field.
Args:
df: Dataframe of AQAnnotations that will be filtered by the specified annotation set.
annotSet: String to filter against the annotSet field in the dataset of AQAnnotations.
annotSetArr: Array of Strings to filter against the annotSet field in the dataframe of AQAnnotations. An OR will be applied to the Strings. Only used if annotSet was not specified.
annotSetCompare: Comparison operator to use for the annotSet field in the dataframe of AQAnnotations. Default is '='. Possible values are '=' and '!='.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query. Default is false.
Returns:
Dataframe of AQAnnotations
"""
query = ""
if annotSet != "":
query += ("annotSet " + annotSetCompare + " \"" + annotSet + "\"")
elif len(annotSetArr) > 0:
if annotSetCompare == "=":
query += ("annotSet in " + "('" + "','".join(map(str,annotSetArr)) + "')")
else:
query += ("annotSet not in " + "('" + "','".join(map(str,annotSetArr)) + "')")
if negate:
query = "!(" + query + ")"
results = df.filter(query)
if limit > 0:
results = results.limit(limit)
return results
def FilterType(df, annotType="", annotTypeArr=[], annotTypeCompare="=", limit=0, negate=False):
"""Provide the ability to filter a Dataframe of AQAnnotations based on the value in the annotType field.
Args:
df: Dataframe of AQAnnotations that will be filtered by the specified annotation type.
annotType: String to filter against the annotType field in the dataframe of AQAnnotations.
annotTypeArr: Array of Strings to filter against the annotType field in the dataframe of AQAnnotations. An OR will be applied to the Strings. Only used if annotType was not specified.
annotTypeCompare: Comparison operator to use for the annotType field in the dataframe of AQAnnotations. Default is '='. Possible values are '=' and '!='.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query. Default is false.
Returns:
Dataframe of AQAnnotations
"""
query = ""
if annotType != "":
query += ("annotType " + annotTypeCompare + " \"" + annotType + "\"")
elif len(annotTypeArr) > 0:
if annotTypeCompare == "=":
query += ("annotType in " + "('" + "','".join(map(str,annotTypeArr)) + "')")
else:
query += ("annotType not in " + "('" + "','".join(map(str,annotTypeArr)) + "')")
if negate:
query = "!(" + query + ")"
results = df.filter(query)
if limit > 0:
results = results.limit(limit)
return results
def Contains(left, right, limit=0, negate=False):
"""Provide the ability to find annotations that contain another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that contain B. What that means is the start/end offset for an annotation from A must contain the start/end offset from an annotation in B.
The start/end offsets are inclusive. We ultimately return the container annotations (A) that meet this criteria.
We also deduplicate the A annotations as there could be many annotations from B that could be contained by an annotation in A but it only makes sense to return the unique container annotations.
There is also the option of negating the query (think Not Contains) so that we return only A where it does not contain B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they contain AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they occur in the AQAnnotations from 'left'.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT contains). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") <= col("R_startOffset")) &
(col("L.endOffset") >= col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") <= col("R_startOffset")) &
(col("L.endOffset") >= col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def ContainedIn(left, right, limit=0, negate=False):
"""Provide the ability to find annotations that are contained by another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are contained in B.
What that means is the start/end offset for an annotation from A must be contained by the start/end offset from an annotation in B.
The start/end offsets are inclusive. We ultimately return the contained annotations (A) that meet this criteria.
There is also the option of negating the query (think Not Contains) so that we return only A where it is not contained in B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are contained in AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they contain AQAnnotations from 'left'.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT contained in). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_startOffset")) &
(col("L.endOffset") <= col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_startOffset")) &
(col("L.endOffset") <= col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def ContainedInList(left, right):
"""Provide the ability to find annotations that are contained by another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are contained in B. What that means is the start/end offset for an annotation from A must be contained by the start/end offset from an annotation in B.
We of course have to also match on the document id.
We ultimately return a Dataframe with 2 fields where the first field is an annotation from B and the second field is an array of entries from A
that are contained in the first entry.
Args:
left: Dataframe of AQAnnotations, the ones we will return (as a list) if they are contained in AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they contain AQAnnotations from 'left'.
Returns:
Dataframe of (AQAnnotations,Array[AQAnnotations])
"""
def containedAQ(rec):
# Sort the contained annotations
#srecs = sorted(rec[1], key=lambda x: (-1 if x.LendOffset == None else x.LendOffset),reverse=True)
srecs = sorted(rec[1], key=lambda x: (1 if x.LstartOffset == None else x.LstartOffset),reverse=False)
# We can extract the key from the any 'right' entry in sorted recs (we will use the first one)
key = Row(docId = srecs[0].RdocId,
annotSet = srecs[0].RannotSet,
annotType = srecs[0].RannotType,
startOffset = int(srecs[0].RstartOffset),
endOffset = int(srecs[0].RendOffset),
annotId = int(srecs[0].RannotId),
properties = srecs[0].Rproperties)
# Construct the array
values = []
for rec in srecs:
if rec.LdocId != None:
values.append(Row(docId = rec.LdocId,
annotSet = rec.LannotSet,
annotType = rec.LannotType,
startOffset = int(rec.LstartOffset),
endOffset = int(rec.LendOffset),
annotId = int(rec.LannotId),
properties = rec.Lproperties))
return(key,values)
l = left.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("LannotId","LannotSet","LannotType","LdocId","LendOffset","Lproperties","LstartOffset")
r = right.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("RannotId","RannotSet","RannotType","RdocId","RendOffset","Rproperties","RstartOffset")
results = l.join(r,
((col("LdocId") == col("RdocId")) &
(col("LstartOffset") >= col("RstartOffset")) &
(col("LendOffset") <= col("RendOffset")) &
(~((col("LannotSet") == col("RannotSet")) &
(col("LannotType") == col("RannotType")) &
(col("LstartOffset") == col("RstartOffset")) &
(col("LendOffset") == col("RendOffset")))))) \
.rdd \
.groupBy(lambda x: (x["RdocId"],x["RstartOffset"],x["RendOffset"])) \
.map(lambda rec: containedAQ(rec))
return spark.createDataFrame(results.map(lambda x: x),AQSchemaList())
def Before(left, right, dist=sys.maxsize , limit=0, negate=False):
"""Provide the ability to find annotations that are before another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are before B.
What that means is the end offset for an annotation from A must be before (or equal to) the start offset from an annotation in B.
We ultimately return the A annotations that meet this criteria.
A distance operator can also be optionally specified.
This would require an A annotation (endOffset) to occur n characters (or less) before the B annotation (startOffset).
There is also the option of negating the query (think Not Before) so that we return only A where it is not before B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are before AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if are after AQAnnotations from 'left'.
dist: Number of characters where endOffset from 'left' must occur before startOffset from 'right'. Default is sys.maxsize.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT before). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("R_startOffset") >= col("L.endOffset")) &
(col("R_startOffset") - col("L.endOffset") < dist)) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset")))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("R_startOffset") >= col("L.endOffset")) &
(col("R_startOffset") - col("L.endOffset") < dist) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def After(left, right, dist=sys.maxsize , limit=0, negate=False):
"""Provide the ability to find annotations that are after another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are after B.
What that means is the start offset for an annotation from A must be after (or equal to) the end offset from an annotation in B.
We ultimately return the A annotations that meet this criteria.
A distance operator can also be optionally specified.
This would require an A annotation (startOffset) to occur n characters (or less) after the B annotation (endOffset).
There is also the option of negating the query (think Not After) so that we return only A where it is not after B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are after AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if are before AQAnnotations from 'left'.
dist: Number of characters where startOffset from 'left' must occur after endOffset from 'right'. Default is sys.maxsize.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT after). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_endOffset")) &
(col("L.startOffset") - col("R_endOffset") < dist)) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset")))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_endOffset")) &
(col("L.startOffset") - col("R_endOffset") < dist) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def Between(middle, left, right, dist=sys.maxsize , limit=0, negate=False):
"""Provide the ability to find annotations that are before one annotation and after another.
The input is 3 Dataframes of AQAnnotations. We will call them A, B and C.
The purpose is to find those annotations in A that are before B and after C.
What that means is the end offset for an annotation from A must be before (or equal to) the start offset from an annotation in B and the start offset for A be after (or equal to) the end offset from C.
We ultimately return the A annotations that meet this criteria.
A distance operator can also be optionally specified.
This would require an A annotation (endOffset) to occur n characters (or less) before the B annotation (startOffset) and would require the A annotation (startOffset) to occur n characters (or less) after the C annotation (endOffset) .
There is also the option of negating the query (think Not Between) so that we return only A where it is not before B nor after C.
Args:
middle: Dataframe of AQAnnotations, the ones we will return if they are between AQAnnotations from 'left' and AQAnnotations from 'right.
left: Dataframe of AQAnnotations, the ones we are looking to see if they are before AQAnnotations from 'middle'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they are after AQAnnotations from 'middle'.
dist: Number of characters where startOffset from 'middle' must occur after endOffset of 'left' or endOffset from 'middle' must occur before startOffset of 'right'
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT between). Default is false.
Returns:
Dataframe of AQAnnotations
"""
intermediate = None
intermediate2 = None
results = None
if negate:
intermediate = middle.alias("L").join(right.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("R.startOffset") >= col("L.endOffset")) &
(col("R.startOffset") - col("L.endOffset") < dist)) &
(~((col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.startOffset") == col("R.startOffset")) &
(col("L.endOffset") == col("R.endOffset")))),"leftsemi")
intermediate2 = intermediate.alias("L").join(left.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("L.startOffset") >= col("R.endOffset")) &
(col("L.startOffset") - col("R.endOffset") < dist) &
(~((col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.annotId") == col("R.annotId")) &
(col("L.endOffset") == col("R.endOffset"))))),"leftsemi")
results = middle.alias("L").join(intermediate2.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.annotId") == col("R.annotId"))),"leftouter") \
.filter(col("R.docId").isNull()) \
.select("L.*")
else:
intermediate = middle.alias("L").join(right.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("R.startOffset") >= col("L.endOffset")) &
(col("R.startOffset") - col("L.endOffset") < dist)) &
(~((col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.startOffset") == col("R.startOffset")) &
(col("L.endOffset") == col("R.endOffset")))),"leftsemi")
results = intermediate.alias("L").join(left.alias("R"),
((col("L.docId") == col("R.docId")) &
(col("L.startOffset") >= col("R.endOffset")) &
(col("L.startOffset") - col("R.endOffset") < dist) &
(~((col("L.annotSet") == col("R.annotSet")) &
(col("L.annotType") == col("R.annotType")) &
(col("L.startOffset") == col("R.startOffset")) &
(col("L.endOffset") == col("R.endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def Sequence(left, right, dist=sys.maxsize, limit=0):
"""Provide the ability to find annotations that are before another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are before B.
What that means is the end offset for an annotation from A must be before (or equal to) the start offset from an annotation in B.
We ultimately return the annotations that meet this criteria.
Unlike the Before function, we adjust the returned annotation a bit.
For example, we set the annotType to "seq" and we use the A startOffset and the B endOffset.
A distance operator can also be optionally specified. This would require an A annotation (endOffset) to occur n characters (or less) before the B annotation (startOffset).
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are before AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if are after AQAnnotations from 'left'.
dist: Number of characters where endOffset from 'left' must occur before startOffset from 'right'. Default is sys.maxsize.
limit: Number of AQAnnotations to return.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("R_startOffset") >= col("L.endOffset")) &
(col("R_startOffset") - col("L.endOffset") < dist)) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))) \
.select("L.docId", "L.annotSet", "L.startOffset", "R_endOffset", "L.annotId") \
.withColumnRenamed("R_endOffset", "endOffset") \
.withColumn("annotType",lit("seq")) \
.withColumn("properties",lit(None)) \
.dropDuplicates(["docId","annotSet","annotType","annotId","startOffset","endOffset"])
if limit > 0:
results = results.limit(limit)
return results
def Or(left, right, limit=0):
"""Provide the ability to combine (union) Dataframes of AQAnnotations.
The input is 2 Dataframes of AQAnnotations. The output is the union of these annotations.
Args:
left: Dataframe of AQAnnotations
right: Dataframe of AQAnnotations
limit: Number of AQAnnotations to return.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Will change number of partitions (and impact performance)
results = left.union(right) \
.dropDuplicates(["docId","annotSet","annotType","annotId","startOffset","endOffset"])
if limit > 0:
results = results.limit(limit)
return results
def And(left, right, limit=0, negate=False, leftOnly=True):
"""Provide the ability to find annotations that are in the same document.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A and B that are in the same document.
Args:
left: Dataframe of AQAnnotations
right: Dataframe of AQAnnotations.
limit: Number of AQAnnotations to return.
negate: think and NOT (only return annotations from A that are not in B). Default is false.
leftOnly: Reuturn only the left or the left and right. The default is to only return the left.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
tmpLeft = left.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("L_annotId", "L_annotSet", "L_annotType", "L_docId", "L_endOffset", "L_startOffset", 'L_properties')
if negate:
results = left.alias("L").join(tmpRight.select("R_docId").distinct(),
(col("L.docId") == col("R_docId")),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
if leftOnly:
results = left.alias("L").join(tmpRight.select("R_docId").distinct(),
(col("L.docId") == col("R_docId")),"leftsemi")
else:
a = left.alias("L").join(tmpRight.select("R_docId").distinct(),
(col("L.docId") == col("R_docId")),"leftsemi")
b = right.alias("L").join(tmpLeft.select("L_docId").distinct(),
(col("L.docId") == col("L_docId")),"leftsemi")
# Will change number of partitions (and impact performance)
results = a.union(b) \
.dropDuplicates(["docId","annotSet","annotType","annotId","startOffset","endOffset"])
if limit > 0:
results = results.limit(limit)
return results
def MatchProperty(left, right, name, negate=False, limit=0):
"""Provide the ability to find annotations (looking at their property) that are in the same document.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are in the same document as B and also match values on the specified property.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they match AQAnnotations from 'right'.
right: Dataframe of AQAnnotations the ones we are looking to see if they match AQAnnotations from 'left'.
name: Name of the property to match.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT contains). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.properties.`" + name + "`") == col("R_properties.`" + name + "`"))) ,"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.properties.`" + name + "`") == col("R_properties.`" + name + "`"))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def Preceding(annot, anchor, container=None, cnt=3):
"""Provide the ability to find the preceding sibling annotations for every annotation in the anchor Dataframe of AQAnnotations.
The preceding sibling annotations can optionally be required to be contained in a container Dataframe of AQAnnotations.
The return type of this function is different from other functions.
Instead of returning a Dataframe of AQAnnotations this function returns a Dataframe of (AQAnnotation,Array[AQAnnotation]).
Args:
annot: Dataframe of AQAnnotations, the ones we will be using to look for preceding sibling annotations.
anchor: Dataframe of AQAnnotations starting point for using to look for preceding sibling annotations (use the startOffset and docId).
container: Dataframe of AQAnnotations to use when requiring the preceding sibling annotations to be contained in a specific annotation.
cnt: Number of preceding sibling AQAnnotations to return.
Returns:
Dataframe of (AQAnnotation,Array[AQAnnotation])
"""
# Get the preceding annotations
def precedingAQ(rec,cnt):
# Sort the preceding annotations (limit the number of results to the cnt)
srecs = sorted(rec[1], key=lambda x: (-1 if x.LendOffset == None else x.LendOffset),reverse=True)[0:cnt]
# We can extract the key from the any 'right' entry in sorted recs (we will use the first one)
key = Row(docId = srecs[0].RdocId,
annotSet = srecs[0].RannotSet,
annotType = srecs[0].RannotType,
startOffset = int(srecs[0].RstartOffset),
endOffset = int(srecs[0].RendOffset),
annotId = int(srecs[0].RannotId),
properties = srecs[0].Rproperties)
# Construct the array
values = []
for rec in srecs:
if rec.LdocId != None:
values.append(Row(docId = rec.LdocId,
annotSet = rec.LannotSet,
annotType = rec.LannotType,
startOffset = int(rec.LstartOffset),
endOffset = int(rec.LendOffset),
annotId = int(rec.LannotId),
properties = rec.Lproperties))
return(key,values)
# Get the preceding 'contained' annotations
def precedingContainedAQ(rec):
if rec.CdocId == None:
return (rec.annot,[])
else:
values = []
for entry in rec.annots:
if (entry.startOffset >= rec.CstartOffset) and (entry.endOffset <= rec.CendOffset):
values.append(entry)
return (rec.annot,values)
l = annot.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("LannotId","LannotSet","LannotType","LdocId","LendOffset","Lproperties","LstartOffset")
r = anchor.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("RannotId","RannotSet","RannotType","RdocId","RendOffset","Rproperties","RstartOffset")
# Group on the anchor annotation
results = l.join(r,
(col("LdocId") == col("RdocId")) &
(col("LendOffset") <= col("RstartOffset")),
"rightouter") \
.rdd \
.groupBy(lambda x: (x["RdocId"],x["RstartOffset"],x["RendOffset"])) \
.map(lambda rec: precedingAQ(rec,cnt))
results = spark.createDataFrame(results.map(lambda x: x),AQSchemaList())
if (container != None) and (not(container.rdd.isEmpty())):
c = container.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("CannotId","CannotSet","CannotType","CdocId","CendOffset","Cproperties","CstartOffset")
cResults = results.join(c,
(col("annot.docId") == col("CdocId")) &
(col("annot.startOffset") >= col("CstartOffset")) &
(col("annot.endOffset") <= col("CendOffset")),
"leftouter") \
.rdd \
.map(lambda rec: precedingContainedAQ(rec))
# Need to drop duplicates
return spark.createDataFrame(cResults.map(lambda x: x),AQSchemaList())
else:
return results
def Following(annot, anchor, container=None, cnt=3):
"""Provide the ability to find the following sibling annotations for every annotation in the anchor Dataframe of AQAnnotations.
The following sibling annotations can optionally be required to be contained in a container Dataframe of AQAnnotations.
The return type of this function is different from other functions.
Instead of returning a Dataframe of AQAnnotations this function returns a Dataframe (AQAnnotation,Array[AQAnnotation]).
Args:
annot: Dataframe of AQAnnotations, the ones we will be using to look for following sibling annotations.
anchor: Dataframe of AQAnnotations starting point for using to look for following sibling annotations (use the endOffset and docId).
container: Dataframe of AQAnnotations to use when requiring the following sibling annotations to be contained in a specific annotation.
cnt: Number of preceding sibling AQAnnotations to return.
Returns:
Dataframe of (AQAnnotation,Array[AQAnnotation])
"""
# Get the following annotations
def followingAQ(rec,cnt):
# Sort the following annotations (limit the number of results to the cnt)
srecs = sorted(rec[1], key=lambda x: (-1 if x.LstartOffset == None else x.LstartOffset))[0:cnt]
# We can extract the key from the any 'right' entry in sorted recs (we will use the first one)
key = Row(docId = srecs[0].RdocId,
annotSet = srecs[0].RannotSet,
annotType = srecs[0].RannotType,
startOffset = int(srecs[0].RstartOffset),
endOffset = int(srecs[0].RendOffset),
annotId = int(srecs[0].RannotId),
properties = srecs[0].Rproperties)
# Construct the array
values = []
for rec in srecs:
if rec.LdocId != None:
values.append(Row(docId = rec.LdocId,
annotSet = rec.LannotSet,
annotType = rec.LannotType,
startOffset = int(rec.LstartOffset),
endOffset = int(rec.LendOffset),
annotId = int(rec.LannotId),
properties = rec.Lproperties))
return(key,values)
# Get the following 'contained' annotations
def followingContainedAQ(rec):
if rec.CdocId == None:
return (rec.annot,[])
else:
values = []
for entry in rec.annots:
if (entry.startOffset >= rec.CstartOffset) and (entry.endOffset <= rec.CendOffset):
values.append(entry)
return (rec.annot,values)
l = annot.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("LannotId","LannotSet","LannotType","LdocId","LendOffset","Lproperties","LstartOffset")
r = anchor.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("RannotId","RannotSet","RannotType","RdocId","RendOffset","Rproperties","RstartOffset")
# Group on the anchor annotation
results = l.join(r,
(col("LdocId") == col("RdocId")) &
(col("LstartOffset") >= col("RendOffset")),
"rightouter") \
.rdd \
.groupBy(lambda x: (x["RdocId"],x["RstartOffset"],x["RendOffset"])) \
.map(lambda rec: followingAQ(rec,cnt))
results = spark.createDataFrame(results.map(lambda x: x),AQSchemaList())
if (container != None) and (not(container.rdd.isEmpty())):
c = container.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("CannotId","CannotSet","CannotType","CdocId","CendOffset","Cproperties","CstartOffset")
cResults = results.join(c,
(col("annot.docId") == col("CdocId")) &
(col("annot.startOffset") >= col("CstartOffset")) &
(col("annot.endOffset") <= col("CendOffset")),
"leftouter") \
.rdd \
.map(lambda rec: followingContainedAQ(rec))
# Need to drop duplicates
return spark.createDataFrame(cResults.map(lambda x: x),AQSchemaList())
else:
return results
def TokensSpan(tokens, spans, tokenProperty):
"""Provides the ability to create a string from a list of tokens that are contained in a span.
The specified tokenProperty is used to extract the values from the tokens when creating the string.
For SCNLP, this tokenProperty could be values like 'orig', 'lemma', or 'pos'. The spans would typically be a SCNLP 'sentence' or could even be things like an OM 'ce:para'.
Args:
tokens: Dataframe of AQAnnotations (which we will use to concatenate for the string)
spans: Dataframe of AQAnnotations (identifies the start/end for the tokens to be used for the concatenated string)
tokenProperty: The property field in the tokens to use for extracting the value for the concatenated string
Returns:
Dataframe[AQAnnotation] spans with 3 new properties all prefixed with the specified tokenProperty value followed by (ToksStr, ToksSpos, ToksEpos) The ToksStr property will be the
concatenated string of token property values contained in the span. The ToksSPos and ToksEpos are properties that will help us determine the start/end offset for each of the individual tokens in the ToksStr.
These helper properties are needed for the function RegexTokensSpan so we can generate accurate accurate start/end offsets based on the str file.
"""
def process(rec):
span = rec[0]
tokens = rec[1]
newProps = {}
oldProps = span.properties
for key in oldProps.keys():
newProps[key] = oldProps[key]
toksStr = []
toksSpos = []
toksEpos = []
offset = 0
for token in tokens:
tokeStr = ""
if (token.properties != None) and (tokenProperty in token.properties):
tokStr = token.properties[tokenProperty]
toksStr.append(tokStr)
toksSpos.append((str(offset) + "|" + str(token.startOffset)))
offset += len(tokStr)
toksEpos.append((str(offset) + "|" + str(token.endOffset)))
offset += 1
newProps[tokenProperty + "ToksStr"] = " ".join(toksStr)
newProps[tokenProperty + "ToksSpos"] = " ".join(toksSpos)
newProps[tokenProperty + "ToksEpos"] = " ".join(toksEpos)
return Row(docId = span.docId,
annotSet = span.annotSet,
annotType = span.annotType,
startOffset = span.startOffset,
endOffset = span.endOffset,
annotId = span.annotId,
properties = newProps)
results = ContainedInList(tokens,spans).rdd.map(lambda rec: process(rec))
return spark.createDataFrame(results.map(lambda x: x),AQSchema())
def RegexTokensSpan(tokensSpan, prop, regex, annotSet="",annotType="", annotProps={}):
"""Provides the ability to apply a regular expression to the concatenated string generated by TokensSpan.
For the strings matching the regex,a Dataframe[AQAnnotations] will be returned.
The AQAnnotation will correspond to the offsets within the concatenated string containing the match.
Args:
tokensSpan: Datafrane of AQAnnotations (the annotations returned from the TokensSpan function)
prop: the property name (orig, lemma, pos) that was used to generate the string for the span in TokensSpan
regex: the regular expression to apply to the span
annotSet: the value to assign to annotSet for the returned matched annotations (default will be the annotSet from the tokensSpan)
annotType: the value to assign to annotType for the returned matched annotations (default will be the annotType from the tokensSpan)
annotProps: the additional properties to append to the properties map for the returned matched annotations
Returns:
Dataframe[AQAnnotation] for the strings matching the regex
"""
def process(partition,prop,regex,annotSet,annotType,annotProps):
import regex as re
import builtins as py_builtin
results = []
annotId = 0
pattern = re.compile(regex)
for rec in partition:
if (rec.properties != None) and (prop+"ToksStr" in rec.properties):
span = rec.properties[prop+"ToksStr"]
for match in re.finditer(pattern, span):
annotId += 1
newAnnotSet = annotSet
newAnnotType = annotType
if (annotSet == ""):
newAnnotSet = rec.annotSet
if (annotType == ""):
newAnnotType = rec.annotType
props = {}
oldProps = rec.properties
for key in annotProps.keys():
props[key] = annotProps[key]
# start
startPos = -1
startPosLB = []
for start in oldProps[prop+"ToksSpos"].split(" "):
startToks = start.split("|")
if int(startToks[0]) == match.start():
startPos = int(startToks[1])
if int(startToks[0]) < match.start():
startPosLB.append(int(startToks[1]))
if startPos == -1:
startPos = py_builtin.max(startPosLB)
# end
endPos = -1
endPosLB = []
for end in oldProps[prop+"ToksEpos"].split(" "):
endToks = end.split("|")
if int(endToks[0]) == match.end():
endPos = int(endToks[1])
if int(endToks[0]) > match.end():
endPosLB.append(int(endToks[1]))
if endPos == -1:
endPos = py_builtin.min(endPosLB)
props[prop+"Match"] = span[match.start():match.end()]
# get the excludes from the span (but only include those contained in within the match)
for key in oldProps.keys():
if key == "excludes":
excludesLB = []
for exclude in oldProps[key].split("|"):
arr = exclude.split(",")
excludeStart = int(arr[3])
excludeEnd = int(arr[4])
if excludeStart >= startPos and excludeEnd <= endPos:
excludesLB.append(exclude)
if len(excludesLB):
props["excludes"] = "|".join(excludesLB)
annot = Row(docId = rec.docId,
annotSet = newAnnotSet,
annotType = newAnnotType,
startOffset = startPos,
endOffset = endPos,
annotId = annotId,
properties = props)
results.append(annot)
return iter(results)
results = tokensSpan.rdd.mapPartitions(lambda partition: process(partition,prop,regex,annotSet,annotType,annotProps))
return spark.createDataFrame(results.map(lambda x: x),AQSchema()) | 0.749546 | 0.642517 |
from pathlib import Path
from bokeh.palettes import diverging_palette
import geopandas as gpd
import numpy as np
import pandas as pd
import pandas_bokeh
data_dir = Path("../data")
html_dir = Path("../html")
pandas_bokeh.output_notebook()
# %%
esbmap_stations = (
gpd.read_file(
data_dir / "heatmap_stations.geojson",
driver="GeoJSON",
)
.to_crs(epsg=2157)
.assign(
name=lambda df: df["Station Name"],
voltages=lambda df: df["Secondary Voltage(s)"],
slr_load_mva=lambda gdf: gdf["SLR Load MVA"].round(),
installed_capacity_mva=lambda gdf: gdf["Installed Capacity MVA"].round(),
planned_capacity_mva=lambda gdf: gdf["Demand Planning Capacity"].round(),
demand_available_mva=lambda gdf: gdf["Demand Available MVA"].round(),
gen_available_firm_mva=lambda gdf: gdf["Gen Available Firm"].round(),
scaled_installed_capacity_mva=lambda gdf: gdf["installed_capacity_mva"] / 4,
)
)
# %%
esbmap_stations_clustered = gpd.read_file(
data_dir / "esbmap_stations_clustered.geojson", driver="GeoJSON"
)
# %%
small_area_esbmap_stations = gpd.read_file(
data_dir / "small_area_esbmap_stations.geojson", driver="GeoJSON"
)
# %%[markdown]
# # Plot Station clusters via Bokeh
# %%
# pandas_bokeh.output_file(html_dir / "substations_clustered_to_10_points.html")
# %%
hovertool_string = """
<table style="background-color:#084594;color:#ffffff">
<tr>
<td>Demand Available [MVA]</th>
<td>@demand_available_mva</td>
</tr>
<tr>
<td>Installed Capacity [MVA]</th>
<td>@installed_capacity_mva</td>
</tr>
<tr>
<td>SLR Load [MVA]</th>
<td>@slr_load_mva</td>
</tr>
</table>
"""
figure = small_area_esbmap_stations.plot_bokeh(
figsize=(700, 900),
dropdown=["demand_available_mva", "installed_capacity_mva", "slr_load_mva"],
# colormap=(
# "#f7fbff",
# "#3182bd",
# ),
colormap_range=(0, 50),
hovertool_string=hovertool_string,
fill_alpha=0.5,
)
hovertool_string = """
<table style="background-color:#084594;color:#ffffff">
<tr>
<td>Name</th>
<td>@station_name</td>
</tr>
<tr>
<td>Secondary Voltage(s)</th>
<td>@voltages</td>
</tr>
<tr>
<td>Demand Available [MVA]</th>
<td>@demand_available_mva</td>
</tr>
<tr>
<td>Installed Capacity [MVA]</th>
<td>@installed_capacity_mva</td>
</tr>
<tr>
<td>SLR Load [MVA]</th>
<td>@slr_load_mva</td>
</tr>
</table>
"""
figure = esbmap_stations.plot_bokeh(
figure=figure,
marker="inverted_triangle",
hovertool_string=hovertool_string,
legend="Substations",
size="scaled_installed_capacity_mva",
fill_alpha=0.2,
)
# %% [markdown]
# # Plot Station Clusters via Seaborn
# %%
def plot_clusters(boundary, unclustered, clustered, column_name):
f, ax = plt.subplots(figsize=(20, 20))
boundary.plot(ax=ax, alpha=0.5)
clustered.plot(ax=ax, c="#99cc99", edgecolor="None", alpha=0.7, markersize=120)
clustered.apply(
lambda gdf: ax.annotate(
"ID = " + str(gdf["cluster_id"]),
xy=gdf.geometry.centroid.coords[0],
va="top",
path_effects=[pe.withStroke(linewidth=4, foreground="white")],
),
axis="columns",
)
clustered.apply(
lambda gdf: ax.annotate(
gdf[column_name],
xy=gdf.geometry.centroid.coords[0],
va="bottom",
path_effects=[pe.withStroke(linewidth=4, foreground="white")],
),
axis="columns",
)
unclustered.plot(ax=ax, c="k", alpha=0.9, markersize=3)
return f, ax
# %% [markdown]
# ## Plot Remaining SLR load after bottom-up cluster load
# %%
f, ax = plot_clusters(
dublin_boundary,
esbmap_stations,
esbmap_stations_clustered,
"remaining_load_mva_upper",
)
plt.title(
"Remaining SLR Substation capacity - Low Demand Scenario",
fontsize=20,
)
props = dict(boxstyle="round", facecolor="yellow", alpha=0.5)
textstr = "Assuming:\n" "Mean residential peak load of 1.5kW\n"
# place a text box in upper left in axes coords
ax.text(
0.67,
0.99,
textstr,
transform=ax.transAxes,
fontsize=12,
verticalalignment="top",
bbox=props,
)
f.savefig(data_dir / "Remaining Capacity 2kW Residential Load & Eirgrid DC Load.png")
# %%
f, ax = plot_clusters(
dublin_boundary,
esbmap_stations,
esbmap_stations_clustered,
"remaining_load_mva_lower",
)
plt.title(
"Remaining SLR Substation capacity - High Demand Scenario",
fontsize=20,
)
props = dict(boxstyle="round", facecolor="yellow", alpha=0.5)
textstr = "Assuming:\n" "Mean residential peak load of 2kW\n"
# place a text box in upper left in axes coords
ax.text(
0.67,
0.99,
textstr,
transform=ax.transAxes,
fontsize=12,
verticalalignment="top",
bbox=props,
)
# %% [markdown]
# ## Plot Small Area Remaining Capacity
# %%
def replace_legend_items(legend, mapping):
for txt in legend.texts:
for k, v in mapping.items():
if txt.get_text() == str(k):
txt.set_text(v)
f, ax = plt.subplots(figsize=(20, 20))
bins = mc.UserDefined(
esbmap_stations_clustered["demand_available_mva"], [-np.inf, 10, 60, np.inf]
)
mapping = dict([(i, s) for i, s in enumerate(bins.get_legend_classes())])
esbmap_stations_clustered.assign(hdd=bins.yb).plot(
column="hdd",
categorical=True,
cmap="OrRd",
legend=True,
legend_kwds={"loc": "lower right"},
ax=ax,
)
ax.set_axis_off()
replace_legend_items(ax.get_legend(), mapping)
plt.title("Demand Availability [MVA]", fontsize=20)
plt.show() | notebooks/plot_clustered_esbmap_stations.py | from pathlib import Path
from bokeh.palettes import diverging_palette
import geopandas as gpd
import numpy as np
import pandas as pd
import pandas_bokeh
data_dir = Path("../data")
html_dir = Path("../html")
pandas_bokeh.output_notebook()
# %%
esbmap_stations = (
gpd.read_file(
data_dir / "heatmap_stations.geojson",
driver="GeoJSON",
)
.to_crs(epsg=2157)
.assign(
name=lambda df: df["Station Name"],
voltages=lambda df: df["Secondary Voltage(s)"],
slr_load_mva=lambda gdf: gdf["SLR Load MVA"].round(),
installed_capacity_mva=lambda gdf: gdf["Installed Capacity MVA"].round(),
planned_capacity_mva=lambda gdf: gdf["Demand Planning Capacity"].round(),
demand_available_mva=lambda gdf: gdf["Demand Available MVA"].round(),
gen_available_firm_mva=lambda gdf: gdf["Gen Available Firm"].round(),
scaled_installed_capacity_mva=lambda gdf: gdf["installed_capacity_mva"] / 4,
)
)
# %%
esbmap_stations_clustered = gpd.read_file(
data_dir / "esbmap_stations_clustered.geojson", driver="GeoJSON"
)
# %%
small_area_esbmap_stations = gpd.read_file(
data_dir / "small_area_esbmap_stations.geojson", driver="GeoJSON"
)
# %%[markdown]
# # Plot Station clusters via Bokeh
# %%
# pandas_bokeh.output_file(html_dir / "substations_clustered_to_10_points.html")
# %%
hovertool_string = """
<table style="background-color:#084594;color:#ffffff">
<tr>
<td>Demand Available [MVA]</th>
<td>@demand_available_mva</td>
</tr>
<tr>
<td>Installed Capacity [MVA]</th>
<td>@installed_capacity_mva</td>
</tr>
<tr>
<td>SLR Load [MVA]</th>
<td>@slr_load_mva</td>
</tr>
</table>
"""
figure = small_area_esbmap_stations.plot_bokeh(
figsize=(700, 900),
dropdown=["demand_available_mva", "installed_capacity_mva", "slr_load_mva"],
# colormap=(
# "#f7fbff",
# "#3182bd",
# ),
colormap_range=(0, 50),
hovertool_string=hovertool_string,
fill_alpha=0.5,
)
hovertool_string = """
<table style="background-color:#084594;color:#ffffff">
<tr>
<td>Name</th>
<td>@station_name</td>
</tr>
<tr>
<td>Secondary Voltage(s)</th>
<td>@voltages</td>
</tr>
<tr>
<td>Demand Available [MVA]</th>
<td>@demand_available_mva</td>
</tr>
<tr>
<td>Installed Capacity [MVA]</th>
<td>@installed_capacity_mva</td>
</tr>
<tr>
<td>SLR Load [MVA]</th>
<td>@slr_load_mva</td>
</tr>
</table>
"""
figure = esbmap_stations.plot_bokeh(
figure=figure,
marker="inverted_triangle",
hovertool_string=hovertool_string,
legend="Substations",
size="scaled_installed_capacity_mva",
fill_alpha=0.2,
)
# %% [markdown]
# # Plot Station Clusters via Seaborn
# %%
def plot_clusters(boundary, unclustered, clustered, column_name):
f, ax = plt.subplots(figsize=(20, 20))
boundary.plot(ax=ax, alpha=0.5)
clustered.plot(ax=ax, c="#99cc99", edgecolor="None", alpha=0.7, markersize=120)
clustered.apply(
lambda gdf: ax.annotate(
"ID = " + str(gdf["cluster_id"]),
xy=gdf.geometry.centroid.coords[0],
va="top",
path_effects=[pe.withStroke(linewidth=4, foreground="white")],
),
axis="columns",
)
clustered.apply(
lambda gdf: ax.annotate(
gdf[column_name],
xy=gdf.geometry.centroid.coords[0],
va="bottom",
path_effects=[pe.withStroke(linewidth=4, foreground="white")],
),
axis="columns",
)
unclustered.plot(ax=ax, c="k", alpha=0.9, markersize=3)
return f, ax
# %% [markdown]
# ## Plot Remaining SLR load after bottom-up cluster load
# %%
f, ax = plot_clusters(
dublin_boundary,
esbmap_stations,
esbmap_stations_clustered,
"remaining_load_mva_upper",
)
plt.title(
"Remaining SLR Substation capacity - Low Demand Scenario",
fontsize=20,
)
props = dict(boxstyle="round", facecolor="yellow", alpha=0.5)
textstr = "Assuming:\n" "Mean residential peak load of 1.5kW\n"
# place a text box in upper left in axes coords
ax.text(
0.67,
0.99,
textstr,
transform=ax.transAxes,
fontsize=12,
verticalalignment="top",
bbox=props,
)
f.savefig(data_dir / "Remaining Capacity 2kW Residential Load & Eirgrid DC Load.png")
# %%
f, ax = plot_clusters(
dublin_boundary,
esbmap_stations,
esbmap_stations_clustered,
"remaining_load_mva_lower",
)
plt.title(
"Remaining SLR Substation capacity - High Demand Scenario",
fontsize=20,
)
props = dict(boxstyle="round", facecolor="yellow", alpha=0.5)
textstr = "Assuming:\n" "Mean residential peak load of 2kW\n"
# place a text box in upper left in axes coords
ax.text(
0.67,
0.99,
textstr,
transform=ax.transAxes,
fontsize=12,
verticalalignment="top",
bbox=props,
)
# %% [markdown]
# ## Plot Small Area Remaining Capacity
# %%
def replace_legend_items(legend, mapping):
for txt in legend.texts:
for k, v in mapping.items():
if txt.get_text() == str(k):
txt.set_text(v)
f, ax = plt.subplots(figsize=(20, 20))
bins = mc.UserDefined(
esbmap_stations_clustered["demand_available_mva"], [-np.inf, 10, 60, np.inf]
)
mapping = dict([(i, s) for i, s in enumerate(bins.get_legend_classes())])
esbmap_stations_clustered.assign(hdd=bins.yb).plot(
column="hdd",
categorical=True,
cmap="OrRd",
legend=True,
legend_kwds={"loc": "lower right"},
ax=ax,
)
ax.set_axis_off()
replace_legend_items(ax.get_legend(), mapping)
plt.title("Demand Availability [MVA]", fontsize=20)
plt.show() | 0.669313 | 0.302423 |
import pygame
class Window:
"""
The Window class is a wrapper around a pygame window.
It provides very basic rendering functionallity.
Attributes:
-----------------
background_colour : list(int, int, int)
The background colour provided to the constructor.
Used when the method clear is called to determine the clearing colour.
screen : pygame.Surface
The underlying pygame screen that Window is a wrapper for.
screen_size : tuple(int, int)
The size of the screen as provided to the constructor.
Mostly here for ease of access, could be fetched from the screen.
Methods:
-----------------
draw_rect(background_colour : list(int, int, int), width : int, height : int):
Draw a rectangle on the screen with the given data
clear():
Clear the screen with the background colour provided in the constructor
height():
Returns the height of the window
width():
Returns the width of the window
"""
#pylint: disable=no-member
def __init__(self, background_colour, width, height):
"""Initialize the window class.
Parameters:
------------------------------------------
background_colour : list(int, int, int)
A triple of values between 0 and 255 indicating the r, g, b value to clear
width : int
The width of the window
height : int
The height of the window
"""
pygame.init()
self.background_colour = background_colour
self.screen_size = (width, height)
self.screen = pygame.display.set_mode(self.screen_size)
pygame.display.set_caption('DV1614 Assignment 2: Snake')
self.screen.fill(background_colour)
def draw_rect(self, colour, top_left, size):
"""Draw a rectangle on the screen.
Parameters:
------------------------------------------
colour : list(int, int, int)
A triple of values between 0 and 255 indicating the r, g, b value of the rectangle
top_left : tuple(int, int)
The x- and y-coordinates for the top left corner of the rectangle
size : tuple(int, int)
The width and height of the rectangle
"""
pygame.draw.rect(self.screen, colour, (top_left, size))
def clear(self):
"""Clear the screen to the background colour given in the init-function"""
self.screen.fill(self.background_colour)
def width(self):
"""Return the width of the screen"""
return self.screen_size[0]
def height(self):
"""Returns the height of the screen"""
return self.screen_size[1] | provided_code/window.py | import pygame
class Window:
"""
The Window class is a wrapper around a pygame window.
It provides very basic rendering functionallity.
Attributes:
-----------------
background_colour : list(int, int, int)
The background colour provided to the constructor.
Used when the method clear is called to determine the clearing colour.
screen : pygame.Surface
The underlying pygame screen that Window is a wrapper for.
screen_size : tuple(int, int)
The size of the screen as provided to the constructor.
Mostly here for ease of access, could be fetched from the screen.
Methods:
-----------------
draw_rect(background_colour : list(int, int, int), width : int, height : int):
Draw a rectangle on the screen with the given data
clear():
Clear the screen with the background colour provided in the constructor
height():
Returns the height of the window
width():
Returns the width of the window
"""
#pylint: disable=no-member
def __init__(self, background_colour, width, height):
"""Initialize the window class.
Parameters:
------------------------------------------
background_colour : list(int, int, int)
A triple of values between 0 and 255 indicating the r, g, b value to clear
width : int
The width of the window
height : int
The height of the window
"""
pygame.init()
self.background_colour = background_colour
self.screen_size = (width, height)
self.screen = pygame.display.set_mode(self.screen_size)
pygame.display.set_caption('DV1614 Assignment 2: Snake')
self.screen.fill(background_colour)
def draw_rect(self, colour, top_left, size):
"""Draw a rectangle on the screen.
Parameters:
------------------------------------------
colour : list(int, int, int)
A triple of values between 0 and 255 indicating the r, g, b value of the rectangle
top_left : tuple(int, int)
The x- and y-coordinates for the top left corner of the rectangle
size : tuple(int, int)
The width and height of the rectangle
"""
pygame.draw.rect(self.screen, colour, (top_left, size))
def clear(self):
"""Clear the screen to the background colour given in the init-function"""
self.screen.fill(self.background_colour)
def width(self):
"""Return the width of the screen"""
return self.screen_size[0]
def height(self):
"""Returns the height of the screen"""
return self.screen_size[1] | 0.883053 | 0.462412 |
class RecognitionConfig(object):
class AudioEncoding(object):
"""
Audio encoding of the data sent in the audio message. All encodings support
only 1 channel (mono) audio. Only ``FLAC`` includes a header that describes
the bytes of audio that follow the header. The other encodings are raw
audio bytes with no header.
For best results, the audio source should be captured and transmitted using
a lossless encoding (``FLAC`` or ``LINEAR16``). Recognition accuracy may be
reduced if lossy codecs (such as AMR, AMR_WB and MULAW) are used to capture
or transmit the audio, particularly if background noise is present.
Attributes:
ENCODING_UNSPECIFIED (int): Not specified. Will return result ``google.rpc.Code.INVALID_ARGUMENT``.
LINEAR16 (int): Uncompressed 16-bit signed little-endian samples (Linear PCM).
This is the only encoding that may be used by ``AsyncRecognize``.
FLAC (int): This is the recommended encoding for ``SyncRecognize`` and
``StreamingRecognize`` because it uses lossless compression; therefore
recognition accuracy is not compromised by a lossy codec.
The stream FLAC (Free Lossless Audio Codec) encoding is specified at:
http://flac.sourceforge.net/documentation.html.
16-bit and 24-bit samples are supported.
Not all fields in STREAMINFO are supported.
MULAW (int): 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
AMR (int): Adaptive Multi-Rate Narrowband codec. ``sample_rate`` must be 8000 Hz.
AMR_WB (int): Adaptive Multi-Rate Wideband codec. ``sample_rate`` must be 16000 Hz.
"""
ENCODING_UNSPECIFIED = 0
LINEAR16 = 1
FLAC = 2
MULAW = 3
AMR = 4
AMR_WB = 5
class StreamingRecognizeResponse(object):
class EndpointerType(object):
"""
Indicates the type of endpointer event.
Attributes:
ENDPOINTER_EVENT_UNSPECIFIED (int): No endpointer event specified.
START_OF_SPEECH (int): Speech has been detected in the audio stream, and the service is
beginning to process it.
END_OF_SPEECH (int): Speech has ceased to be detected in the audio stream. (For example, the
user may have paused after speaking.) If ``single_utterance`` is ``false``,
the service will continue to process audio, and if subsequent speech is
detected, will send another START_OF_SPEECH event.
END_OF_AUDIO (int): This event is sent after the client has half-closed the input stream gRPC
connection and the server has received all of the audio. (The server may
still be processing the audio and may subsequently return additional
results.)
END_OF_UTTERANCE (int): This event is only sent when ``single_utterance`` is ``true``. It indicates
that the server has detected the end of the user's speech utterance and
expects no additional speech. Therefore, the server will not process
additional audio (although it may subsequently return additional
results). The client should stop sending additional audio data,
half-close the gRPC connection, and wait for any additional results
until the server closes the gRPC connection.
"""
ENDPOINTER_EVENT_UNSPECIFIED = 0
START_OF_SPEECH = 1
END_OF_SPEECH = 2
END_OF_AUDIO = 3
END_OF_UTTERANCE = 4 | generated/python/gapic-google-cloud-speech-v1beta1/google/cloud/gapic/speech/v1beta1/enums.py | class RecognitionConfig(object):
class AudioEncoding(object):
"""
Audio encoding of the data sent in the audio message. All encodings support
only 1 channel (mono) audio. Only ``FLAC`` includes a header that describes
the bytes of audio that follow the header. The other encodings are raw
audio bytes with no header.
For best results, the audio source should be captured and transmitted using
a lossless encoding (``FLAC`` or ``LINEAR16``). Recognition accuracy may be
reduced if lossy codecs (such as AMR, AMR_WB and MULAW) are used to capture
or transmit the audio, particularly if background noise is present.
Attributes:
ENCODING_UNSPECIFIED (int): Not specified. Will return result ``google.rpc.Code.INVALID_ARGUMENT``.
LINEAR16 (int): Uncompressed 16-bit signed little-endian samples (Linear PCM).
This is the only encoding that may be used by ``AsyncRecognize``.
FLAC (int): This is the recommended encoding for ``SyncRecognize`` and
``StreamingRecognize`` because it uses lossless compression; therefore
recognition accuracy is not compromised by a lossy codec.
The stream FLAC (Free Lossless Audio Codec) encoding is specified at:
http://flac.sourceforge.net/documentation.html.
16-bit and 24-bit samples are supported.
Not all fields in STREAMINFO are supported.
MULAW (int): 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
AMR (int): Adaptive Multi-Rate Narrowband codec. ``sample_rate`` must be 8000 Hz.
AMR_WB (int): Adaptive Multi-Rate Wideband codec. ``sample_rate`` must be 16000 Hz.
"""
ENCODING_UNSPECIFIED = 0
LINEAR16 = 1
FLAC = 2
MULAW = 3
AMR = 4
AMR_WB = 5
class StreamingRecognizeResponse(object):
class EndpointerType(object):
"""
Indicates the type of endpointer event.
Attributes:
ENDPOINTER_EVENT_UNSPECIFIED (int): No endpointer event specified.
START_OF_SPEECH (int): Speech has been detected in the audio stream, and the service is
beginning to process it.
END_OF_SPEECH (int): Speech has ceased to be detected in the audio stream. (For example, the
user may have paused after speaking.) If ``single_utterance`` is ``false``,
the service will continue to process audio, and if subsequent speech is
detected, will send another START_OF_SPEECH event.
END_OF_AUDIO (int): This event is sent after the client has half-closed the input stream gRPC
connection and the server has received all of the audio. (The server may
still be processing the audio and may subsequently return additional
results.)
END_OF_UTTERANCE (int): This event is only sent when ``single_utterance`` is ``true``. It indicates
that the server has detected the end of the user's speech utterance and
expects no additional speech. Therefore, the server will not process
additional audio (although it may subsequently return additional
results). The client should stop sending additional audio data,
half-close the gRPC connection, and wait for any additional results
until the server closes the gRPC connection.
"""
ENDPOINTER_EVENT_UNSPECIFIED = 0
START_OF_SPEECH = 1
END_OF_SPEECH = 2
END_OF_AUDIO = 3
END_OF_UTTERANCE = 4 | 0.901324 | 0.655198 |
import copy
import glob
import jinja2
import jinja2.ext
import os
import shutil
import subprocess
import sys
import yaml
# For list.append in Jinja templates
Jinja2 = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath="."),extensions=['jinja2.ext.do'])
def file_get_contents(filename, encoding=None):
with open(filename, mode='r', encoding=encoding) as fh:
return fh.read()
def file_put_contents(filename, s, encoding=None):
with open(filename, mode='w', encoding=encoding) as fh:
fh.write(s)
def populate(filename, config, delimiter, overwrite=False):
fragments = glob.glob(os.path.join('oqs-template', filename, '*.fragment'))
if overwrite == True:
source_file = os.path.join('oqs-template', filename, os.path.basename(filename)+ '.base')
contents = file_get_contents(source_file)
else:
contents = file_get_contents(filename)
for fragment in fragments:
identifier = os.path.splitext(os.path.basename(fragment))[0]
identifier_start = '{} OQS_TEMPLATE_FRAGMENT_{}_START'.format(delimiter, identifier.upper())
identifier_end = '{} OQS_TEMPLATE_FRAGMENT_{}_END'.format(delimiter, identifier.upper())
preamble = contents[:contents.find(identifier_start)]
postamble = contents[contents.find(identifier_end):]
if overwrite == True:
contents = preamble + Jinja2.get_template(fragment).render({'config': config}) + postamble.replace(identifier_end + '\n', '')
else:
contents = preamble + identifier_start + Jinja2.get_template(fragment).render({'config': config}) + postamble
file_put_contents(filename, contents)
def load_config():
config = file_get_contents(os.path.join('oqs-template', 'generate.yml'), encoding='utf-8')
config = yaml.safe_load(config)
for sig in config['sigs']:
sig['variants'] = [variant for variant in sig['variants'] if variant['enable']]
config['sigs'] = [sig for sig in config['sigs'] if sig['variants']]
return config
config = load_config()
# update build script
populate('configure.ac', config, '#####')
# add kems
populate('kex.c', config, '/////')
populate('kex.h', config, '/////')
populate('kexoqs.c', config, '/////')
populate('myproposal.h', config, '/////')
populate('regress/unittests/kex/test_kex.c', config, '/////')
populate('ssh2.h', config, '/////')
# add sigs
populate('oqs-utils.h', config, '/////')
populate('pathnames.h', config, '/////')
populate('readconf.c', config, '/////')
populate('servconf.c', config, '/////')
populate('ssh-add.c', config, '/////')
populate('ssh-keygen.c', config, '/////')
populate('ssh-keyscan.c', config, '/////')
populate('ssh-keysign.c', config, '/////')
populate('ssh-oqs.c', config, '/////')
populate('ssh.c', config, '/////')
populate('sshconnect.c', config, '/////')
populate('sshkey.c', config, '/////')
populate('sshkey.h', config, '/////')
# update test suite
populate('oqs-test/test_openssh.py', config, '#####') | openssh/oqs-template/generate.py |
import copy
import glob
import jinja2
import jinja2.ext
import os
import shutil
import subprocess
import sys
import yaml
# For list.append in Jinja templates
Jinja2 = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath="."),extensions=['jinja2.ext.do'])
def file_get_contents(filename, encoding=None):
with open(filename, mode='r', encoding=encoding) as fh:
return fh.read()
def file_put_contents(filename, s, encoding=None):
with open(filename, mode='w', encoding=encoding) as fh:
fh.write(s)
def populate(filename, config, delimiter, overwrite=False):
fragments = glob.glob(os.path.join('oqs-template', filename, '*.fragment'))
if overwrite == True:
source_file = os.path.join('oqs-template', filename, os.path.basename(filename)+ '.base')
contents = file_get_contents(source_file)
else:
contents = file_get_contents(filename)
for fragment in fragments:
identifier = os.path.splitext(os.path.basename(fragment))[0]
identifier_start = '{} OQS_TEMPLATE_FRAGMENT_{}_START'.format(delimiter, identifier.upper())
identifier_end = '{} OQS_TEMPLATE_FRAGMENT_{}_END'.format(delimiter, identifier.upper())
preamble = contents[:contents.find(identifier_start)]
postamble = contents[contents.find(identifier_end):]
if overwrite == True:
contents = preamble + Jinja2.get_template(fragment).render({'config': config}) + postamble.replace(identifier_end + '\n', '')
else:
contents = preamble + identifier_start + Jinja2.get_template(fragment).render({'config': config}) + postamble
file_put_contents(filename, contents)
def load_config():
config = file_get_contents(os.path.join('oqs-template', 'generate.yml'), encoding='utf-8')
config = yaml.safe_load(config)
for sig in config['sigs']:
sig['variants'] = [variant for variant in sig['variants'] if variant['enable']]
config['sigs'] = [sig for sig in config['sigs'] if sig['variants']]
return config
config = load_config()
# update build script
populate('configure.ac', config, '#####')
# add kems
populate('kex.c', config, '/////')
populate('kex.h', config, '/////')
populate('kexoqs.c', config, '/////')
populate('myproposal.h', config, '/////')
populate('regress/unittests/kex/test_kex.c', config, '/////')
populate('ssh2.h', config, '/////')
# add sigs
populate('oqs-utils.h', config, '/////')
populate('pathnames.h', config, '/////')
populate('readconf.c', config, '/////')
populate('servconf.c', config, '/////')
populate('ssh-add.c', config, '/////')
populate('ssh-keygen.c', config, '/////')
populate('ssh-keyscan.c', config, '/////')
populate('ssh-keysign.c', config, '/////')
populate('ssh-oqs.c', config, '/////')
populate('ssh.c', config, '/////')
populate('sshconnect.c', config, '/////')
populate('sshkey.c', config, '/////')
populate('sshkey.h', config, '/////')
# update test suite
populate('oqs-test/test_openssh.py', config, '#####') | 0.203708 | 0.057467 |
import traceback
from flask import Flask, Response, request, jsonify
from flask.ext.cors import CORS, cross_origin
from TermSuggestionsAggregator import TermSuggestionsAggregator, Aggregation
from elsearch import ELSearch
from wnsearch import WNSearch
from word2vec import Word2VecSuggester
from precomputed import PrecomputedClusterSuggester
from rocchio import RocchioSuggester
import MakeChart
from config import get_word2vec_model
from rocchio import RocchioSuggester
app = Flask(__name__)
CORS(app)
methodsConfigurationDict = {1: (WNSearch, ()),
2: (ELSearch, ()),
3: (PrecomputedClusterSuggester, ()),
4: (Word2VecSuggester, (get_word2vec_model(), )),
5: (RocchioSuggester, ()),
}
methodsInstances = {}
for mKey in methodsConfigurationDict:
methodsInstances[mKey] = methodsConfigurationDict[mKey][0](*methodsConfigurationDict[mKey][1])
ts = TermSuggestionsAggregator()
@app.route('/')
@cross_origin(supports_credentials=True)
def api_root():
m = {}
for methodKey in sorted(methodsConfigurationDict.keys()):
m[methodKey ] = (methodsConfigurationDict[methodKey][0].__name__, methodsConfigurationDict[methodKey][1])
return jsonify(m)
@app.errorhandler(404)
@cross_origin(supports_credentials=True)
def api_error(error=None):
message = {
'status': 404,
'message': 'Error: ' + error,
}
resp = jsonify(message)
resp.status_code = 404
return resp
@app.route("/suggester", methods = ['GET',])
@cross_origin(supports_credentials=True)
def api_term():
if request.method == 'GET':
if 'term' in request.args:
if 'agg-method' in request.args:
aggMethod = str(request.args['agg-method']).strip()
if aggMethod == 'sum':
aggMethod = Aggregation.Sum
elif aggMethod == 'average':
aggMethod = Aggregation.Average
else:
return api_error('specify correct aggregation method: sum or average')
else:
# Default aggragation method
aggMethod = Aggregation.Sum
if 'methods[]' in request.args:
methods_str = request.values.getlist('methods[]')
methods = [methodsInstances[int(m)] for m in methods_str]
else:
return api_error('Please select one or more query expansion methods.')
# Get the suggestions
data = ts.getSuggestions(str(request.args['term']), methods, aggMethod)
resp = Response(MakeChart.dict2bar(data), status=200, mimetype='application/json')
return resp
else:
return api_error('a term is required')
if __name__ == "__main__":
app.run(debug=True) | webserver/webTermSuggester.py | import traceback
from flask import Flask, Response, request, jsonify
from flask.ext.cors import CORS, cross_origin
from TermSuggestionsAggregator import TermSuggestionsAggregator, Aggregation
from elsearch import ELSearch
from wnsearch import WNSearch
from word2vec import Word2VecSuggester
from precomputed import PrecomputedClusterSuggester
from rocchio import RocchioSuggester
import MakeChart
from config import get_word2vec_model
from rocchio import RocchioSuggester
app = Flask(__name__)
CORS(app)
methodsConfigurationDict = {1: (WNSearch, ()),
2: (ELSearch, ()),
3: (PrecomputedClusterSuggester, ()),
4: (Word2VecSuggester, (get_word2vec_model(), )),
5: (RocchioSuggester, ()),
}
methodsInstances = {}
for mKey in methodsConfigurationDict:
methodsInstances[mKey] = methodsConfigurationDict[mKey][0](*methodsConfigurationDict[mKey][1])
ts = TermSuggestionsAggregator()
@app.route('/')
@cross_origin(supports_credentials=True)
def api_root():
m = {}
for methodKey in sorted(methodsConfigurationDict.keys()):
m[methodKey ] = (methodsConfigurationDict[methodKey][0].__name__, methodsConfigurationDict[methodKey][1])
return jsonify(m)
@app.errorhandler(404)
@cross_origin(supports_credentials=True)
def api_error(error=None):
message = {
'status': 404,
'message': 'Error: ' + error,
}
resp = jsonify(message)
resp.status_code = 404
return resp
@app.route("/suggester", methods = ['GET',])
@cross_origin(supports_credentials=True)
def api_term():
if request.method == 'GET':
if 'term' in request.args:
if 'agg-method' in request.args:
aggMethod = str(request.args['agg-method']).strip()
if aggMethod == 'sum':
aggMethod = Aggregation.Sum
elif aggMethod == 'average':
aggMethod = Aggregation.Average
else:
return api_error('specify correct aggregation method: sum or average')
else:
# Default aggragation method
aggMethod = Aggregation.Sum
if 'methods[]' in request.args:
methods_str = request.values.getlist('methods[]')
methods = [methodsInstances[int(m)] for m in methods_str]
else:
return api_error('Please select one or more query expansion methods.')
# Get the suggestions
data = ts.getSuggestions(str(request.args['term']), methods, aggMethod)
resp = Response(MakeChart.dict2bar(data), status=200, mimetype='application/json')
return resp
else:
return api_error('a term is required')
if __name__ == "__main__":
app.run(debug=True) | 0.32338 | 0.106877 |
import os
import json
from pysls.src.events.generate_event import generete_event
event_sam_file_base = 'event_sam'
event_pysls_file_base = 'event_pysls'
def compare(event_sam_file, event_pysls_file):
with open(event_sam_file, 'r') as event_sam:
event_sam_content = json.load(event_sam)
with open(event_pysls_file, 'r') as event_pysls:
event_pysls_content = json.load(event_pysls)
if event_sam_content == event_pysls_content:
os.remove(event_sam_file)
os.remove(event_pysls_file)
return True
return False
def loop_commands(service_name, event_type, values_to_sub_sam, values_to_sub_pysls):
event_pysls_file = event_pysls_file_base+'_'+service_name+'_'+event_type+'.json'
event_sam_file = event_sam_file_base+'_'+service_name+'_'+event_type+'.json'
os.system('sam local generate-event '+service_name+' '+event_type+' '+values_to_sub_sam+'>> '+event_sam_file)
generete_event(service_name, event_type, values_to_sub_pysls, event_pysls_file)
return compare(event_sam_file, event_pysls_file)
def test_generate_event():
with open(os.path.join('.', 'pysls', 'src', 'events', 'event_mapping.json'), 'r') as event_mapping:
event_mapping_content = json.load(event_mapping)
# some tests are ignored, because my version of sam is a little different, but I confirmed that everything was going well and was
for service_name in event_mapping_content:
for event_type in event_mapping_content[service_name]:
if (service_name != 'apigateway' and event_type != 'authorizer') and (service_name != 'sns' and event_type != 'notification'):
values_to_sub_sam = ''
values_to_sub_pysls = ''
res = loop_commands(service_name, event_type, values_to_sub_sam, values_to_sub_pysls)
assert(res)
def test_generate_event_s3():
values_to_sub_sam = ''
values_to_sub_pysls = ''
res = loop_commands('s3', 'put', '--bucket my_bucket --key arquivo.csv', '--bucket=my_bucket --key=arquivo.csv')
assert(res) | pysls/tests/generate_event_test.py | import os
import json
from pysls.src.events.generate_event import generete_event
event_sam_file_base = 'event_sam'
event_pysls_file_base = 'event_pysls'
def compare(event_sam_file, event_pysls_file):
with open(event_sam_file, 'r') as event_sam:
event_sam_content = json.load(event_sam)
with open(event_pysls_file, 'r') as event_pysls:
event_pysls_content = json.load(event_pysls)
if event_sam_content == event_pysls_content:
os.remove(event_sam_file)
os.remove(event_pysls_file)
return True
return False
def loop_commands(service_name, event_type, values_to_sub_sam, values_to_sub_pysls):
event_pysls_file = event_pysls_file_base+'_'+service_name+'_'+event_type+'.json'
event_sam_file = event_sam_file_base+'_'+service_name+'_'+event_type+'.json'
os.system('sam local generate-event '+service_name+' '+event_type+' '+values_to_sub_sam+'>> '+event_sam_file)
generete_event(service_name, event_type, values_to_sub_pysls, event_pysls_file)
return compare(event_sam_file, event_pysls_file)
def test_generate_event():
with open(os.path.join('.', 'pysls', 'src', 'events', 'event_mapping.json'), 'r') as event_mapping:
event_mapping_content = json.load(event_mapping)
# some tests are ignored, because my version of sam is a little different, but I confirmed that everything was going well and was
for service_name in event_mapping_content:
for event_type in event_mapping_content[service_name]:
if (service_name != 'apigateway' and event_type != 'authorizer') and (service_name != 'sns' and event_type != 'notification'):
values_to_sub_sam = ''
values_to_sub_pysls = ''
res = loop_commands(service_name, event_type, values_to_sub_sam, values_to_sub_pysls)
assert(res)
def test_generate_event_s3():
values_to_sub_sam = ''
values_to_sub_pysls = ''
res = loop_commands('s3', 'put', '--bucket my_bucket --key arquivo.csv', '--bucket=my_bucket --key=arquivo.csv')
assert(res) | 0.222362 | 0.138724 |
import subprocess
from pathlib import Path
# configs
samples = [
"mada_1-19",
"mada_1-8",
"mada_1-6",
"mada_103",
"mada_1-1",
"mada_130",
"mada_132",
"mada_128",
"mada_2-1",
"mada_1-51",
"mada_1-20",
"mada_2-31",
"mada_109",
"mada_112",
"mada_1-5",
"mada_107",
"mada_1-40",
"mada_111",
"mada_1-14",
"mada_1-46",
"mada_136",
"mada_1-54",
"mada_1-25",
"mada_118",
"mada_129",
"mada_1-18",
"mada_151",
"mada_134",
"mada_1-3",
"mada_1-44",
"mada_1-15",
"mada_1-47",
"mada_2-53",
"mada_1-16",
"mada_1-2",
"mada_154",
"mada_104",
"mada_115",
"mada_126",
"mada_1-33",
"mada_102",
"mada_127",
"mada_125",
"mada_1-43",
"mada_137",
"mada_117",
"mada_131",
"mada_2-46",
"mada_1-17",
"mada_124",
"mada_121",
"mada_141",
"mada_2-42",
"mada_1-28",
"mada_152",
"mada_1-48",
"mada_2-34",
"mada_123",
"mada_106",
"mada_140",
"mada_1-30",
"mada_1-50",
"mada_139",
"mada_1-41",
"mada_2-25",
"mada_105",
"mada_144",
"mada_1-32",
"mada_1-22",
"mada_1-11",
"mada_1-10",
"mada_110",
"mada_120",
"mada_113",
"mada_1-38",
"mada_122",
"mada_1-7",
"mada_116",
"mada_142",
"mada_133",
"mada_148",
"mada_1-13",
"mada_1-53",
"mada_135",
"mada_1-12",
"mada_2-50",
"mada_1-21",
"mada_1-39",
"mada_1-36",
"mada_143",
"mada_150",
]
H2H_path = Path(
"/hps/nobackup/iqbal/mbhall/tech_wars/data/QC/filtered/madagascar/nanopore"
)
tbpore_path = Path(
"/hps/nobackup/iqbal/leandro/tbpore2/tbpore/pipelines/snakemake/output_human_decon"
)
sample_to_nb_of_diffs = {}
for sample in samples:
h2h_keep_reads = H2H_path / sample / "keep.reads"
tbpore_keep_reads = (
tbpore_path / sample / ".tbpore" / f"{sample}.decontaminated.filter/keep.reads"
)
diff_out = subprocess.check_output(
f'/bin/bash -c "diff --suppress-common-lines -y <(sort {h2h_keep_reads}) <(sort {tbpore_keep_reads}) | wc -l"',
shell=True,
)
nb_of_diffs = int(diff_out.strip())
sample_to_nb_of_diffs[sample] = nb_of_diffs
print("Differences:")
for sample, nb_of_diffs in sample_to_nb_of_diffs.items():
print(f"{sample} {nb_of_diffs}")
print(f"Total {sum(sample_to_nb_of_diffs.values())}") | pipelines/snakemake/scripts/compare_kept_reads.py | import subprocess
from pathlib import Path
# configs
samples = [
"mada_1-19",
"mada_1-8",
"mada_1-6",
"mada_103",
"mada_1-1",
"mada_130",
"mada_132",
"mada_128",
"mada_2-1",
"mada_1-51",
"mada_1-20",
"mada_2-31",
"mada_109",
"mada_112",
"mada_1-5",
"mada_107",
"mada_1-40",
"mada_111",
"mada_1-14",
"mada_1-46",
"mada_136",
"mada_1-54",
"mada_1-25",
"mada_118",
"mada_129",
"mada_1-18",
"mada_151",
"mada_134",
"mada_1-3",
"mada_1-44",
"mada_1-15",
"mada_1-47",
"mada_2-53",
"mada_1-16",
"mada_1-2",
"mada_154",
"mada_104",
"mada_115",
"mada_126",
"mada_1-33",
"mada_102",
"mada_127",
"mada_125",
"mada_1-43",
"mada_137",
"mada_117",
"mada_131",
"mada_2-46",
"mada_1-17",
"mada_124",
"mada_121",
"mada_141",
"mada_2-42",
"mada_1-28",
"mada_152",
"mada_1-48",
"mada_2-34",
"mada_123",
"mada_106",
"mada_140",
"mada_1-30",
"mada_1-50",
"mada_139",
"mada_1-41",
"mada_2-25",
"mada_105",
"mada_144",
"mada_1-32",
"mada_1-22",
"mada_1-11",
"mada_1-10",
"mada_110",
"mada_120",
"mada_113",
"mada_1-38",
"mada_122",
"mada_1-7",
"mada_116",
"mada_142",
"mada_133",
"mada_148",
"mada_1-13",
"mada_1-53",
"mada_135",
"mada_1-12",
"mada_2-50",
"mada_1-21",
"mada_1-39",
"mada_1-36",
"mada_143",
"mada_150",
]
H2H_path = Path(
"/hps/nobackup/iqbal/mbhall/tech_wars/data/QC/filtered/madagascar/nanopore"
)
tbpore_path = Path(
"/hps/nobackup/iqbal/leandro/tbpore2/tbpore/pipelines/snakemake/output_human_decon"
)
sample_to_nb_of_diffs = {}
for sample in samples:
h2h_keep_reads = H2H_path / sample / "keep.reads"
tbpore_keep_reads = (
tbpore_path / sample / ".tbpore" / f"{sample}.decontaminated.filter/keep.reads"
)
diff_out = subprocess.check_output(
f'/bin/bash -c "diff --suppress-common-lines -y <(sort {h2h_keep_reads}) <(sort {tbpore_keep_reads}) | wc -l"',
shell=True,
)
nb_of_diffs = int(diff_out.strip())
sample_to_nb_of_diffs[sample] = nb_of_diffs
print("Differences:")
for sample, nb_of_diffs in sample_to_nb_of_diffs.items():
print(f"{sample} {nb_of_diffs}")
print(f"Total {sum(sample_to_nb_of_diffs.values())}") | 0.264263 | 0.305011 |
import dash
import dash_html_components as html
import dash_bootstrap_components as dbc
import base64
# Author parameters
bg_color="#506784",
font_color="#F3F6FA"
author = "<NAME>"
emailAuthor = "<EMAIL>"
supervisor = "Prof. <NAME>"
emailSupervisor = "<EMAIL>"
logo1path = "./pictures/1200px-Louvain_School_of_Management_logo.svg.png"
logo1URL = "https://uclouvain.be/en/faculties/lsm"
logo2path = "./pictures/1280px-NovaSBE_Logo.svg.png"
logo2URL = "https://www2.novasbe.unl.pt/en/"
# Creating the app header
def header():
return html.Div(
id='app-page-header',
children=[
html.Div(children=[html.A(id='lsm-logo',
children=[html.Img(style={'height':'6%', 'width':'6%'}, src='data:image/png;base64,{}'.format(base64.b64encode(open(f"{logo1path}", 'rb').read()).decode()))],
href=f"{logo1URL}",
target="_blank", #open link in new tab
style={"margin-left":"10px"}
),
html.Div(children=[html.H5("Derivatives replication strategies central hub"),
#html.H6("Cox-Ross-Rubinstein model")
],
style={"display":"inline-block", "font-family":'sans-serif','transform':'translateY(+32%)', "margin-left":"10px"}),
html.Div(children=[dbc.Button("About", id="popover-target", outline=True, style={"color":"white", 'border': 'solid 1px white'}),
dbc.Popover(children=[dbc.PopoverHeader("About"),
dbc.PopoverBody([f"{author}",
f"\n {emailAuthor}",
html.Hr(),
f"This app was built for my Master's Thesis, under the supervision of {supervisor} ({emailSupervisor})."]),
],
id="popover",
is_open=False,
target="popover-target"),
],
style={"display":"inline-block","font-family":"sans-serif","marginLeft":"50%", "margin-right":"10px"}),
html.A(id="nova-logo",
children=[html.Img(style={"height":"9%","width":"9%"}, src="data:image/png;base64,{}".format(base64.b64encode(open(f"{logo2path}","rb").read()).decode()))],
href=f"{logo2URL}",
target="_blank",
style={}
)
]
,style={"display":"inline-block"}),
],
style={
'background': bg_color,
'color': font_color,
"padding-bottom": "10px",
"padding-top":"-10px"
}
) | appHeader.py | import dash
import dash_html_components as html
import dash_bootstrap_components as dbc
import base64
# Author parameters
bg_color="#506784",
font_color="#F3F6FA"
author = "<NAME>"
emailAuthor = "<EMAIL>"
supervisor = "Prof. <NAME>"
emailSupervisor = "<EMAIL>"
logo1path = "./pictures/1200px-Louvain_School_of_Management_logo.svg.png"
logo1URL = "https://uclouvain.be/en/faculties/lsm"
logo2path = "./pictures/1280px-NovaSBE_Logo.svg.png"
logo2URL = "https://www2.novasbe.unl.pt/en/"
# Creating the app header
def header():
return html.Div(
id='app-page-header',
children=[
html.Div(children=[html.A(id='lsm-logo',
children=[html.Img(style={'height':'6%', 'width':'6%'}, src='data:image/png;base64,{}'.format(base64.b64encode(open(f"{logo1path}", 'rb').read()).decode()))],
href=f"{logo1URL}",
target="_blank", #open link in new tab
style={"margin-left":"10px"}
),
html.Div(children=[html.H5("Derivatives replication strategies central hub"),
#html.H6("Cox-Ross-Rubinstein model")
],
style={"display":"inline-block", "font-family":'sans-serif','transform':'translateY(+32%)', "margin-left":"10px"}),
html.Div(children=[dbc.Button("About", id="popover-target", outline=True, style={"color":"white", 'border': 'solid 1px white'}),
dbc.Popover(children=[dbc.PopoverHeader("About"),
dbc.PopoverBody([f"{author}",
f"\n {emailAuthor}",
html.Hr(),
f"This app was built for my Master's Thesis, under the supervision of {supervisor} ({emailSupervisor})."]),
],
id="popover",
is_open=False,
target="popover-target"),
],
style={"display":"inline-block","font-family":"sans-serif","marginLeft":"50%", "margin-right":"10px"}),
html.A(id="nova-logo",
children=[html.Img(style={"height":"9%","width":"9%"}, src="data:image/png;base64,{}".format(base64.b64encode(open(f"{logo2path}","rb").read()).decode()))],
href=f"{logo2URL}",
target="_blank",
style={}
)
]
,style={"display":"inline-block"}),
],
style={
'background': bg_color,
'color': font_color,
"padding-bottom": "10px",
"padding-top":"-10px"
}
) | 0.376623 | 0.156846 |
import json
import os
import shutil
import sys
from argparse import ArgumentParser
from pathlib import Path
from catalyst.utils import load_ordered_yaml
from jinja2 import Environment, FileSystemLoader
sys.path.insert(0, str(Path(__file__).absolute().parent.parent / "src"))
from data.data_info import TargetMapInfo
from utils import process_class_config
from make_dataset import Constants
parser = ArgumentParser()
parser.add_argument(
'--class_config', type=Path, required=True,
help="path to class config"
)
parser.add_argument(
'--in_template', type=Path, required=True,
help="path to save dataset copy (if omitted no copy will be saved)"
)
parser.add_argument(
'--out_config_dir', type=Path, required=True,
help="path to folder where to save all configs"
)
parser.add_argument(
'--out_config_name', type=str, default="config.yml",
help="name of generated output config"
)
parser.add_argument(
'--all_datasets_json', type=str, required=True,
help="path to json with all prepared datasets information"
)
parser.add_argument(
'--train', type=str, required=True,
help="names of training datasets keys in all_datasets_json, separated by space"
)
parser.add_argument(
'--valid', type=str, required=True,
help="names of training datasets keys in all_datasets_json, separated by space"
)
parser.add_argument(
'--infer', type=str, required=False, nargs='*',
help="names of training datasets keys in all_datasets_json, separated by space"
)
parser.add_argument(
'--batch_size', type=int, default=8
)
parser.add_argument(
'--num_epochs', type=int, default=500,
)
parser.add_argument(
'--num_workers', type=int, default=4
)
parser.add_argument(
'--image_size', type=int, default=512
)
parser.add_argument(
'--max_caching_size', type=int, default=1000
)
parser.add_argument(
'--dilated_model', type=bool, default=False
)
class DatasetInfo:
def __init__(self, json_responce):
self.datapath = json_responce[Constants.DatasetFields.DATA_PATH]
self.csv_path = json_responce[Constants.DatasetFields.CSV_PATH]
self.info_json_path = json_responce[Constants.DatasetFields.INFO_JSON_PATH]
def main(
in_template,
class_config_path,
out_config_dir,
out_config_name,
batch_size,
num_epochs,
num_workers,
image_size,
max_caching_size,
all_datasets_json,
train, valid, infer=None,
dilated_model=False
):
assert os.path.exists(in_template)
assert os.path.exists(class_config_path)
os.makedirs(out_config_dir, exist_ok=True)
shutil.copy(in_template, out_config_dir / "_template.yml")
shutil.copy(class_config_path, out_config_dir / "class_config.yml")
# read class config
with open(class_config_path, "r") as fin:
class_config = load_ordered_yaml(fin)
# set default values
class_config = process_class_config(class_config["class_config"])
target_map_info = TargetMapInfo(class_config)
# all datasets json
with open(all_datasets_json, "r") as json_file:
_json = json.load(json_file)
train_dataset = DatasetInfo(_json[train]) if train else None
valid_dataset = DatasetInfo(_json[valid]) if valid else None
infer_datasets = {name: DatasetInfo(_json[name]) for name in infer}
# processing template
env = Environment(
loader=FileSystemLoader(str(in_template.absolute().parent)),
trim_blocks=True,
lstrip_blocks=True
)
env.globals.update(zip=zip) # enable zip command inside jinja2 template
template = env.get_template(in_template.name)
out_config = out_config_dir / out_config_name
out_config.write_text(
template.render(
tm=target_map_info,
train_dataset=train_dataset,
valid_dataset=valid_dataset,
infer_datasets=infer_datasets,
num_epochs=num_epochs,
batch_size=batch_size,
num_workers=num_workers,
image_size=image_size,
max_caching_size=max_caching_size,
dilated_model=dilated_model
)
)
def _main():
args = parser.parse_args()
main(
in_template=args.in_template,
class_config_path=args.class_config,
out_config_dir=args.out_config_dir,
out_config_name=args.out_config_name,
all_datasets_json=args.all_datasets_json,
train=args.train,
valid=args.valid,
infer=args.infer,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
num_workers=args.num_workers,
image_size=args.image_size,
max_caching_size=args.max_caching_size,
dilated_model=args.dilated_model
)
if __name__ == '__main__':
_main() | data_preparation/make_config.py | import json
import os
import shutil
import sys
from argparse import ArgumentParser
from pathlib import Path
from catalyst.utils import load_ordered_yaml
from jinja2 import Environment, FileSystemLoader
sys.path.insert(0, str(Path(__file__).absolute().parent.parent / "src"))
from data.data_info import TargetMapInfo
from utils import process_class_config
from make_dataset import Constants
parser = ArgumentParser()
parser.add_argument(
'--class_config', type=Path, required=True,
help="path to class config"
)
parser.add_argument(
'--in_template', type=Path, required=True,
help="path to save dataset copy (if omitted no copy will be saved)"
)
parser.add_argument(
'--out_config_dir', type=Path, required=True,
help="path to folder where to save all configs"
)
parser.add_argument(
'--out_config_name', type=str, default="config.yml",
help="name of generated output config"
)
parser.add_argument(
'--all_datasets_json', type=str, required=True,
help="path to json with all prepared datasets information"
)
parser.add_argument(
'--train', type=str, required=True,
help="names of training datasets keys in all_datasets_json, separated by space"
)
parser.add_argument(
'--valid', type=str, required=True,
help="names of training datasets keys in all_datasets_json, separated by space"
)
parser.add_argument(
'--infer', type=str, required=False, nargs='*',
help="names of training datasets keys in all_datasets_json, separated by space"
)
parser.add_argument(
'--batch_size', type=int, default=8
)
parser.add_argument(
'--num_epochs', type=int, default=500,
)
parser.add_argument(
'--num_workers', type=int, default=4
)
parser.add_argument(
'--image_size', type=int, default=512
)
parser.add_argument(
'--max_caching_size', type=int, default=1000
)
parser.add_argument(
'--dilated_model', type=bool, default=False
)
class DatasetInfo:
def __init__(self, json_responce):
self.datapath = json_responce[Constants.DatasetFields.DATA_PATH]
self.csv_path = json_responce[Constants.DatasetFields.CSV_PATH]
self.info_json_path = json_responce[Constants.DatasetFields.INFO_JSON_PATH]
def main(
in_template,
class_config_path,
out_config_dir,
out_config_name,
batch_size,
num_epochs,
num_workers,
image_size,
max_caching_size,
all_datasets_json,
train, valid, infer=None,
dilated_model=False
):
assert os.path.exists(in_template)
assert os.path.exists(class_config_path)
os.makedirs(out_config_dir, exist_ok=True)
shutil.copy(in_template, out_config_dir / "_template.yml")
shutil.copy(class_config_path, out_config_dir / "class_config.yml")
# read class config
with open(class_config_path, "r") as fin:
class_config = load_ordered_yaml(fin)
# set default values
class_config = process_class_config(class_config["class_config"])
target_map_info = TargetMapInfo(class_config)
# all datasets json
with open(all_datasets_json, "r") as json_file:
_json = json.load(json_file)
train_dataset = DatasetInfo(_json[train]) if train else None
valid_dataset = DatasetInfo(_json[valid]) if valid else None
infer_datasets = {name: DatasetInfo(_json[name]) for name in infer}
# processing template
env = Environment(
loader=FileSystemLoader(str(in_template.absolute().parent)),
trim_blocks=True,
lstrip_blocks=True
)
env.globals.update(zip=zip) # enable zip command inside jinja2 template
template = env.get_template(in_template.name)
out_config = out_config_dir / out_config_name
out_config.write_text(
template.render(
tm=target_map_info,
train_dataset=train_dataset,
valid_dataset=valid_dataset,
infer_datasets=infer_datasets,
num_epochs=num_epochs,
batch_size=batch_size,
num_workers=num_workers,
image_size=image_size,
max_caching_size=max_caching_size,
dilated_model=dilated_model
)
)
def _main():
args = parser.parse_args()
main(
in_template=args.in_template,
class_config_path=args.class_config,
out_config_dir=args.out_config_dir,
out_config_name=args.out_config_name,
all_datasets_json=args.all_datasets_json,
train=args.train,
valid=args.valid,
infer=args.infer,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
num_workers=args.num_workers,
image_size=args.image_size,
max_caching_size=args.max_caching_size,
dilated_model=args.dilated_model
)
if __name__ == '__main__':
_main() | 0.391755 | 0.119974 |
import unittest
from numpy import array, linspace, ones
from numpy.testing import assert_array_equal
from chaco.api import DataRange1D
from chaco.function_data_source import FunctionDataSource
from traits.testing.unittest_tools import UnittestTools
class FunctionDataSourceTestCase(UnittestTools, unittest.TestCase):
def setUp(self):
self.myfunc = lambda low, high: linspace(low, high, 101)**2
self.data_source = FunctionDataSource(func=self.myfunc)
def test_init_defaults(self):
data_source = FunctionDataSource()
assert_array_equal(data_source._data, [])
self.assertEqual(data_source.value_dimension, "scalar")
self.assertEqual(data_source.sort_order, "ascending")
self.assertFalse(data_source.is_masked())
def test_basic_setup(self):
assert_array_equal(self.myfunc, self.data_source.func)
self.assertEqual(self.data_source.value_dimension, "scalar")
self.assertEqual(self.data_source.sort_order, "ascending")
self.assertFalse(self.data_source.is_masked())
def test_set_data(self):
with self.assertRaises(RuntimeError):
self.data_source.set_data(
lambda low, high: linspace(low, high, 101))
def test_range_high_changed(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
with self.assertTraitChanges(
self.data_source, 'data_changed', count=1):
self.data_source.data_range.high_setting = 2.0
assert_array_equal(
linspace(0.0, 2.0, 101)**2, self.data_source.get_data())
def test_range_low_changed(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
with self.assertTraitChanges(
self.data_source, 'data_changed', count=1):
self.data_source.data_range.low_setting = -1.0
assert_array_equal(
linspace(-1.0, 1.0, 101)**2, self.data_source.get_data())
def test_range_data_range_changed(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
with self.assertTraitChanges(
self.data_source, 'data_changed', count=1):
self.data_source.data_range = DataRange1D(
low_setting=-2.0, high_setting=2.0)
assert_array_equal(
linspace(-2.0, 2.0, 101)**2, self.data_source.get_data())
def test_set_mask(self):
mymask = array([i % 2 for i in range(101)], dtype=bool)
with self.assertRaises(NotImplementedError):
self.data_source.set_mask(mymask)
def test_remove_mask(self):
with self.assertRaises(NotImplementedError):
self.data_source.remove_mask()
def test_get_data(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
assert_array_equal(
linspace(0.0, 1.0, 101)**2, self.data_source.get_data())
def test_get_data_no_data(self):
self.data_source = FunctionDataSource()
assert_array_equal(self.data_source.get_data(), array([], dtype=float))
def test_get_data_mask(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
data, mask = self.data_source.get_data_mask()
assert_array_equal(data, linspace(0.0, 1.0, 101)**2)
assert_array_equal(mask, ones(shape=101, dtype=bool))
def test_bounds(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=2.0)
bounds = self.data_source.get_bounds()
self.assertEqual(bounds, (0.0, 4.0))
@unittest.skip("default sort_order is ascending, which isn't right")
def test_bounds_non_monotone(self):
self.data_source.data_range = DataRange1D(
low_setting=-2.0, high_setting=2.0)
bounds = self.data_source.get_bounds()
self.assertEqual(bounds, (0.0, 4.0))
def test_data_size(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=2.0)
self.assertEqual(101, self.data_source.get_size()) | chaco/tests/function_data_source_test_case.py | import unittest
from numpy import array, linspace, ones
from numpy.testing import assert_array_equal
from chaco.api import DataRange1D
from chaco.function_data_source import FunctionDataSource
from traits.testing.unittest_tools import UnittestTools
class FunctionDataSourceTestCase(UnittestTools, unittest.TestCase):
def setUp(self):
self.myfunc = lambda low, high: linspace(low, high, 101)**2
self.data_source = FunctionDataSource(func=self.myfunc)
def test_init_defaults(self):
data_source = FunctionDataSource()
assert_array_equal(data_source._data, [])
self.assertEqual(data_source.value_dimension, "scalar")
self.assertEqual(data_source.sort_order, "ascending")
self.assertFalse(data_source.is_masked())
def test_basic_setup(self):
assert_array_equal(self.myfunc, self.data_source.func)
self.assertEqual(self.data_source.value_dimension, "scalar")
self.assertEqual(self.data_source.sort_order, "ascending")
self.assertFalse(self.data_source.is_masked())
def test_set_data(self):
with self.assertRaises(RuntimeError):
self.data_source.set_data(
lambda low, high: linspace(low, high, 101))
def test_range_high_changed(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
with self.assertTraitChanges(
self.data_source, 'data_changed', count=1):
self.data_source.data_range.high_setting = 2.0
assert_array_equal(
linspace(0.0, 2.0, 101)**2, self.data_source.get_data())
def test_range_low_changed(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
with self.assertTraitChanges(
self.data_source, 'data_changed', count=1):
self.data_source.data_range.low_setting = -1.0
assert_array_equal(
linspace(-1.0, 1.0, 101)**2, self.data_source.get_data())
def test_range_data_range_changed(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
with self.assertTraitChanges(
self.data_source, 'data_changed', count=1):
self.data_source.data_range = DataRange1D(
low_setting=-2.0, high_setting=2.0)
assert_array_equal(
linspace(-2.0, 2.0, 101)**2, self.data_source.get_data())
def test_set_mask(self):
mymask = array([i % 2 for i in range(101)], dtype=bool)
with self.assertRaises(NotImplementedError):
self.data_source.set_mask(mymask)
def test_remove_mask(self):
with self.assertRaises(NotImplementedError):
self.data_source.remove_mask()
def test_get_data(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
assert_array_equal(
linspace(0.0, 1.0, 101)**2, self.data_source.get_data())
def test_get_data_no_data(self):
self.data_source = FunctionDataSource()
assert_array_equal(self.data_source.get_data(), array([], dtype=float))
def test_get_data_mask(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0)
data, mask = self.data_source.get_data_mask()
assert_array_equal(data, linspace(0.0, 1.0, 101)**2)
assert_array_equal(mask, ones(shape=101, dtype=bool))
def test_bounds(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=2.0)
bounds = self.data_source.get_bounds()
self.assertEqual(bounds, (0.0, 4.0))
@unittest.skip("default sort_order is ascending, which isn't right")
def test_bounds_non_monotone(self):
self.data_source.data_range = DataRange1D(
low_setting=-2.0, high_setting=2.0)
bounds = self.data_source.get_bounds()
self.assertEqual(bounds, (0.0, 4.0))
def test_data_size(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=2.0)
self.assertEqual(101, self.data_source.get_size()) | 0.587943 | 0.695648 |
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
import logging
import numpy as np
import os.path as osp
import pandas as pd
from torch_geometric.data import Data
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.data import DataLoader
class DNN(nn.Module):
def __init__(self):
super(DNN, self).__init__()
self.fc1 = Linear(2048, 1024)
self.fc2 = Linear(1024, 516)
self.fc3 = Linear(516, 256)
self.fc4 = Linear(256, 128)
self.fc5 = Linear(128, 1)
def forward(self, data):
x = F.relu(self.fc1(data.x))
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(self.fc3(x))
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(self.fc4(x))
x = F.dropout(x, p=0.2, training=self.training)
x = self.fc5(x)
return x
def read_datasets(path):
data = np.load(path)
fps = data["fp"].astype(np.float32)
targets = data["pka"].reshape(-1, 1).astype(np.float32)
return fps, targets
def numpy_to_tensor(X, y):
datas = []
for idx in range(X.shape[0]):
fp = X[idx].reshape(1, 2048)
pka = y[idx].reshape(1, 1)
data = Data(x=torch.tensor(fp, dtype=torch.float32),
y=torch.tensor(pka, dtype=torch.float32))
datas.append(data)
return datas
def gen_data(X, y):
data = numpy_to_tensor(X, y)
train_data, valid_data = train_test_split(data, test_size=0.1)
train_loader = DataLoader(train_data, batch_size=128, shuffle=True, drop_last=True)
valid_loader = DataLoader(valid_data, batch_size=128, shuffle=True, drop_last=True)
return train_loader, valid_loader
def train_step(loader, model, optimizer, device):
model.train()
loss_all = 0
i = 0
for data in loader:
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output, data.y)
loss.backward()
loss_all += loss.item()
optimizer.step()
i += 1
return loss_all / i
def test_step(loader, model, device):
model.eval()
MSE, MAE = 0, 0
trues, preds = [], []
with torch.no_grad():
for data in loader:
data = data.to(device)
output = model(data)
pred = output.cpu().numpy()[0][0]
true = data.y.cpu().numpy()[0][0]
trues.append(true)
preds.append(pred)
MAE = mean_absolute_error(trues, preds)
MSE = mean_squared_error(trues, preds)
R2 = r2_score(trues, preds)
return MAE, MSE, R2
def train(train_loader, test_loader, epochs):
model = DNN()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
hist = {"train-loss":[], "test-mae":[], "test-mse":[], "test-r2":[]}
for epoch in range(epochs):
weight_path = "models/weight_ap_{}.pth".format(epoch)
train_loss = train_step(train_loader, model, optimizer, device)
test_mae, test_mse, test_r2 = test_step(test_loader, model, device)
hist["train-loss"].append(train_loss)
hist["test-mae"].append(test_mae)
hist["test-mse"].append(test_mse)
hist["test-r2"].append(test_r2)
if test_mae <= min(hist["test-mae"]):
torch.save(model.state_dict(), weight_path)
print(f'Epoch: {epoch}, Train loss: {train_loss:.3}, Test mae: {test_mae:.3}, Test mse: {test_mse:.3}, Test r2: {test_r2:.3}')
print("---------------------------------\nmin mae: {}\n---------------------------------\n".format(min(hist["test-mae"])))
return
if __name__=="__main__":
path_data = "datasets/datasets_ap.npz"
fps, pkas = read_datasets(path_data)
train_loader, valid_loader = gen_data(fps, pkas)
train(train_loader, valid_loader, epochs=50) | src/baseline/train_ap.py | from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
import logging
import numpy as np
import os.path as osp
import pandas as pd
from torch_geometric.data import Data
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.data import DataLoader
class DNN(nn.Module):
def __init__(self):
super(DNN, self).__init__()
self.fc1 = Linear(2048, 1024)
self.fc2 = Linear(1024, 516)
self.fc3 = Linear(516, 256)
self.fc4 = Linear(256, 128)
self.fc5 = Linear(128, 1)
def forward(self, data):
x = F.relu(self.fc1(data.x))
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(self.fc3(x))
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(self.fc4(x))
x = F.dropout(x, p=0.2, training=self.training)
x = self.fc5(x)
return x
def read_datasets(path):
data = np.load(path)
fps = data["fp"].astype(np.float32)
targets = data["pka"].reshape(-1, 1).astype(np.float32)
return fps, targets
def numpy_to_tensor(X, y):
datas = []
for idx in range(X.shape[0]):
fp = X[idx].reshape(1, 2048)
pka = y[idx].reshape(1, 1)
data = Data(x=torch.tensor(fp, dtype=torch.float32),
y=torch.tensor(pka, dtype=torch.float32))
datas.append(data)
return datas
def gen_data(X, y):
data = numpy_to_tensor(X, y)
train_data, valid_data = train_test_split(data, test_size=0.1)
train_loader = DataLoader(train_data, batch_size=128, shuffle=True, drop_last=True)
valid_loader = DataLoader(valid_data, batch_size=128, shuffle=True, drop_last=True)
return train_loader, valid_loader
def train_step(loader, model, optimizer, device):
model.train()
loss_all = 0
i = 0
for data in loader:
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output, data.y)
loss.backward()
loss_all += loss.item()
optimizer.step()
i += 1
return loss_all / i
def test_step(loader, model, device):
model.eval()
MSE, MAE = 0, 0
trues, preds = [], []
with torch.no_grad():
for data in loader:
data = data.to(device)
output = model(data)
pred = output.cpu().numpy()[0][0]
true = data.y.cpu().numpy()[0][0]
trues.append(true)
preds.append(pred)
MAE = mean_absolute_error(trues, preds)
MSE = mean_squared_error(trues, preds)
R2 = r2_score(trues, preds)
return MAE, MSE, R2
def train(train_loader, test_loader, epochs):
model = DNN()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
hist = {"train-loss":[], "test-mae":[], "test-mse":[], "test-r2":[]}
for epoch in range(epochs):
weight_path = "models/weight_ap_{}.pth".format(epoch)
train_loss = train_step(train_loader, model, optimizer, device)
test_mae, test_mse, test_r2 = test_step(test_loader, model, device)
hist["train-loss"].append(train_loss)
hist["test-mae"].append(test_mae)
hist["test-mse"].append(test_mse)
hist["test-r2"].append(test_r2)
if test_mae <= min(hist["test-mae"]):
torch.save(model.state_dict(), weight_path)
print(f'Epoch: {epoch}, Train loss: {train_loss:.3}, Test mae: {test_mae:.3}, Test mse: {test_mse:.3}, Test r2: {test_r2:.3}')
print("---------------------------------\nmin mae: {}\n---------------------------------\n".format(min(hist["test-mae"])))
return
if __name__=="__main__":
path_data = "datasets/datasets_ap.npz"
fps, pkas = read_datasets(path_data)
train_loader, valid_loader = gen_data(fps, pkas)
train(train_loader, valid_loader, epochs=50) | 0.879574 | 0.528229 |
import os
import os.path
import pickle
import traceback
from collections import deque, Counter
import numpy as np
import pandas as pd
from state import Molecule, Atom, Bond, length, distance
from util import mp_map_parititons
def main():
mp_map_parititons(process_partition)
def process_partition(index):
try:
with open(f'data/partitions/molecules/{index}.p', 'rb') as fp:
molecules = pickle.load(fp)
molecules_by_name = {m.name: m for m in molecules}
compute_features('train', index, molecules_by_name)
compute_features('test', index, molecules_by_name)
except Exception:
traceback.print_exc()
raise
def compute_features(name, index, molecules_by_name):
data = pd.read_pickle(f'data/partitions/{name}/{index}.p')
for coupling_type, type_df in data.groupby('type'):
type_df = type_df.sort_values('id')
acc_features = []
for _, row in type_df.iterrows():
molecule: Molecule = molecules_by_name[row['molecule_name']]
features = compute_pair_features(row, molecule)
features['id'] = row['id']
try:
features['scalar_coupling_constant'] = row['scalar_coupling_constant']
except KeyError:
pass
acc_features.append(features)
acc_features = pd.DataFrame(acc_features)
path = f'data/partitions/features/{name}/{coupling_type}/{index}.p'
dr = os.path.dirname(path)
if not os.path.exists(dr):
os.makedirs(dr, exist_ok=True)
acc_features.to_pickle(path)
def compute_pair_features(row: pd.Series,
molecule: Molecule) -> dict:
a0: Atom = molecule.atoms[row['atom_index_0']]
a1: Atom = molecule.atoms[row['atom_index_1']]
features = {'distance': distance(a0.position, a1.position),
'molecular_weight': molecule.molecular_weight}
for prefix, atom in [('a0', a0), ('a1', a1)]:
features[prefix + '_bonds'] = atom.n_bonds
features[prefix + '_partial_charge'] = atom.partial_charge
cycles = Counter(','.join([a.hybridized_symbol for a in cycle])
for cycle in atom.cycles())
for cycle, n in cycles.items():
features[prefix + '_cyc_' + cycle] = n
for prefix, bonds in [('b0', a0.bonded_neighbors_count),
('b1', a1.bonded_neighbors_count)]:
for bond_type, bond_count in bonds.items():
features[prefix + '_' + bond_type] = bond_count
for prefix, bonds in [('bs0', a0.secondary_bonded_neighbors_count),
('bs1', a1.secondary_bonded_neighbors_count)]:
for (ba1t, ba2t), bond_count in bonds.items():
features[prefix + '_' + ba1t + '-' + ba2t] = bond_count
bond_path = find_shortest_path_between_atoms(a0, a1)
if not bond_path:
print("couldn't find bond path between atoms")
else:
b0: Bond = bond_path[0]
b1: Bond = bond_path[-1]
v0 = bond_vector(a0, b0)
v1 = bond_vector(a1, b1)
l0 = length(v0)
l1 = length(v1)
dot = np.sum(v0 * v1)
features.update({
'a0_bond_length': l0,
'a1_bond_length': l1,
'bond_vector_dot': dot,
'bond_vector_dot_norm': dot / (l0 * l1),
'bond_path_length': sum(b.length for b in bond_path)
})
return features
def find_shortest_path_between_atoms(atom_source: Atom,
atom_target: Atom) -> tuple:
bonds_to_explore = deque((atom_source, (b,))
for b in atom_source.bonds)
while bonds_to_explore:
prev_atom, prev_bonds = bonds_to_explore.popleft()
next_atom = prev_bonds[-1].other(prev_atom)
if next_atom is atom_target:
return prev_bonds
elif len(prev_bonds) < 5: # don't recurse too deeply
for next_bond in next_atom.bonds:
if next_bond.other(next_atom) is not prev_atom:
bonds_to_explore.append((next_atom,
prev_bonds + (next_bond,)))
return None
def bond_vector(src: Atom, bond: Bond) -> np.ndarray:
return src.position - bond.other(src).position
__name__ == '__main__' and main() | compute_features.py | import os
import os.path
import pickle
import traceback
from collections import deque, Counter
import numpy as np
import pandas as pd
from state import Molecule, Atom, Bond, length, distance
from util import mp_map_parititons
def main():
mp_map_parititons(process_partition)
def process_partition(index):
try:
with open(f'data/partitions/molecules/{index}.p', 'rb') as fp:
molecules = pickle.load(fp)
molecules_by_name = {m.name: m for m in molecules}
compute_features('train', index, molecules_by_name)
compute_features('test', index, molecules_by_name)
except Exception:
traceback.print_exc()
raise
def compute_features(name, index, molecules_by_name):
data = pd.read_pickle(f'data/partitions/{name}/{index}.p')
for coupling_type, type_df in data.groupby('type'):
type_df = type_df.sort_values('id')
acc_features = []
for _, row in type_df.iterrows():
molecule: Molecule = molecules_by_name[row['molecule_name']]
features = compute_pair_features(row, molecule)
features['id'] = row['id']
try:
features['scalar_coupling_constant'] = row['scalar_coupling_constant']
except KeyError:
pass
acc_features.append(features)
acc_features = pd.DataFrame(acc_features)
path = f'data/partitions/features/{name}/{coupling_type}/{index}.p'
dr = os.path.dirname(path)
if not os.path.exists(dr):
os.makedirs(dr, exist_ok=True)
acc_features.to_pickle(path)
def compute_pair_features(row: pd.Series,
molecule: Molecule) -> dict:
a0: Atom = molecule.atoms[row['atom_index_0']]
a1: Atom = molecule.atoms[row['atom_index_1']]
features = {'distance': distance(a0.position, a1.position),
'molecular_weight': molecule.molecular_weight}
for prefix, atom in [('a0', a0), ('a1', a1)]:
features[prefix + '_bonds'] = atom.n_bonds
features[prefix + '_partial_charge'] = atom.partial_charge
cycles = Counter(','.join([a.hybridized_symbol for a in cycle])
for cycle in atom.cycles())
for cycle, n in cycles.items():
features[prefix + '_cyc_' + cycle] = n
for prefix, bonds in [('b0', a0.bonded_neighbors_count),
('b1', a1.bonded_neighbors_count)]:
for bond_type, bond_count in bonds.items():
features[prefix + '_' + bond_type] = bond_count
for prefix, bonds in [('bs0', a0.secondary_bonded_neighbors_count),
('bs1', a1.secondary_bonded_neighbors_count)]:
for (ba1t, ba2t), bond_count in bonds.items():
features[prefix + '_' + ba1t + '-' + ba2t] = bond_count
bond_path = find_shortest_path_between_atoms(a0, a1)
if not bond_path:
print("couldn't find bond path between atoms")
else:
b0: Bond = bond_path[0]
b1: Bond = bond_path[-1]
v0 = bond_vector(a0, b0)
v1 = bond_vector(a1, b1)
l0 = length(v0)
l1 = length(v1)
dot = np.sum(v0 * v1)
features.update({
'a0_bond_length': l0,
'a1_bond_length': l1,
'bond_vector_dot': dot,
'bond_vector_dot_norm': dot / (l0 * l1),
'bond_path_length': sum(b.length for b in bond_path)
})
return features
def find_shortest_path_between_atoms(atom_source: Atom,
atom_target: Atom) -> tuple:
bonds_to_explore = deque((atom_source, (b,))
for b in atom_source.bonds)
while bonds_to_explore:
prev_atom, prev_bonds = bonds_to_explore.popleft()
next_atom = prev_bonds[-1].other(prev_atom)
if next_atom is atom_target:
return prev_bonds
elif len(prev_bonds) < 5: # don't recurse too deeply
for next_bond in next_atom.bonds:
if next_bond.other(next_atom) is not prev_atom:
bonds_to_explore.append((next_atom,
prev_bonds + (next_bond,)))
return None
def bond_vector(src: Atom, bond: Bond) -> np.ndarray:
return src.position - bond.other(src).position
__name__ == '__main__' and main() | 0.390592 | 0.242873 |
from __future__ import unicode_literals
import flask
from flask import Blueprint, request
import flask_restplus as restplus
# Add a dummy Resource to verify that the app is properly set.
class HelloWorld(restplus.Resource):
def get(self):
return {}
class GoodbyeWorld(restplus.Resource):
def __init__(self, err):
self.err = err
def get(self):
flask.abort(self.err)
class APIWithBlueprintTest(object):
def test_api_base(self, app):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
app.register_blueprint(blueprint)
assert api.urls == {}
assert api.prefix == ''
assert api.default_mediatype == 'application/json'
def test_api_delayed_initialization(self, app):
blueprint = Blueprint('test', __name__)
api = restplus.Api()
api.init_app(blueprint)
app.register_blueprint(blueprint)
api.add_resource(HelloWorld, '/', endpoint="hello")
def test_add_resource_endpoint(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
view = mocker.Mock(**{'as_view.return_value.__name__': str('test_view')})
api.add_resource(view, '/foo', endpoint='bar')
app.register_blueprint(blueprint)
view.as_view.assert_called_with('bar', api)
def test_add_resource_endpoint_after_registration(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
app.register_blueprint(blueprint)
view = mocker.Mock(**{'as_view.return_value.__name__': str('test_view')})
api.add_resource(view, '/foo', endpoint='bar')
view.as_view.assert_called_with('bar', api)
def test_url_with_api_prefix(self, app):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint)
with app.test_request_context('/api/hi'):
assert request.endpoint == 'test.hello'
def test_url_with_blueprint_prefix(self, app):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint)
with app.test_request_context('/bp/hi'):
assert request.endpoint == 'test.hello'
def test_url_with_registration_prefix(self, app):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint, url_prefix='/reg')
with app.test_request_context('/reg/hi'):
assert request.endpoint == 'test.hello'
def test_registration_prefix_overrides_blueprint_prefix(self, app):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint, url_prefix='/reg')
with app.test_request_context('/reg/hi'):
assert request.endpoint == 'test.hello'
def test_url_with_api_and_blueprint_prefix(self, app):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restplus.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint)
with app.test_request_context('/bp/api/hi'):
assert request.endpoint == 'test.hello'
def test_error_routing(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app.register_blueprint(blueprint)
with app.test_request_context('/hi', method='POST'):
assert api._should_use_fr_error_handler() is True
assert api._has_fr_route() is True
with app.test_request_context('/bye'):
api._should_use_fr_error_handler = mocker.Mock(return_value=False)
assert api._has_fr_route() is True
def test_non_blueprint_rest_error_routing(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app.register_blueprint(blueprint, url_prefix='/blueprint')
api2 = restplus.Api(app)
api2.add_resource(HelloWorld(api), '/hi', endpoint="hello")
api2.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
with app.test_request_context('/hi', method='POST'):
assert api._should_use_fr_error_handler() is False
assert api2._should_use_fr_error_handler() is True
assert api._has_fr_route() is False
assert api2._has_fr_route() is True
with app.test_request_context('/blueprint/hi', method='POST'):
assert api._should_use_fr_error_handler() is True
assert api2._should_use_fr_error_handler() is False
assert api._has_fr_route() is True
assert api2._has_fr_route() is False
api._should_use_fr_error_handler = mocker.Mock(return_value=False)
api2._should_use_fr_error_handler = mocker.Mock(return_value=False)
with app.test_request_context('/bye'):
assert api._has_fr_route() is False
assert api2._has_fr_route() is True
with app.test_request_context('/blueprint/bye'):
assert api._has_fr_route() is True
assert api2._has_fr_route() is False
def test_non_blueprint_non_rest_error_routing(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app.register_blueprint(blueprint, url_prefix='/blueprint')
@app.route('/hi')
def hi():
return 'hi'
@app.route('/bye')
def bye():
flask.abort(404)
with app.test_request_context('/hi', method='POST'):
assert api._should_use_fr_error_handler() is False
assert api._has_fr_route() is False
with app.test_request_context('/blueprint/hi', method='POST'):
assert api._should_use_fr_error_handler() is True
assert api._has_fr_route() is True
api._should_use_fr_error_handler = mocker.Mock(return_value=False)
with app.test_request_context('/bye'):
assert api._has_fr_route() is False
with app.test_request_context('/blueprint/bye'):
assert api._has_fr_route() is True | tests/legacy/test_api_with_blueprint.py | from __future__ import unicode_literals
import flask
from flask import Blueprint, request
import flask_restplus as restplus
# Add a dummy Resource to verify that the app is properly set.
class HelloWorld(restplus.Resource):
def get(self):
return {}
class GoodbyeWorld(restplus.Resource):
def __init__(self, err):
self.err = err
def get(self):
flask.abort(self.err)
class APIWithBlueprintTest(object):
def test_api_base(self, app):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
app.register_blueprint(blueprint)
assert api.urls == {}
assert api.prefix == ''
assert api.default_mediatype == 'application/json'
def test_api_delayed_initialization(self, app):
blueprint = Blueprint('test', __name__)
api = restplus.Api()
api.init_app(blueprint)
app.register_blueprint(blueprint)
api.add_resource(HelloWorld, '/', endpoint="hello")
def test_add_resource_endpoint(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
view = mocker.Mock(**{'as_view.return_value.__name__': str('test_view')})
api.add_resource(view, '/foo', endpoint='bar')
app.register_blueprint(blueprint)
view.as_view.assert_called_with('bar', api)
def test_add_resource_endpoint_after_registration(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
app.register_blueprint(blueprint)
view = mocker.Mock(**{'as_view.return_value.__name__': str('test_view')})
api.add_resource(view, '/foo', endpoint='bar')
view.as_view.assert_called_with('bar', api)
def test_url_with_api_prefix(self, app):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint)
with app.test_request_context('/api/hi'):
assert request.endpoint == 'test.hello'
def test_url_with_blueprint_prefix(self, app):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint)
with app.test_request_context('/bp/hi'):
assert request.endpoint == 'test.hello'
def test_url_with_registration_prefix(self, app):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint, url_prefix='/reg')
with app.test_request_context('/reg/hi'):
assert request.endpoint == 'test.hello'
def test_registration_prefix_overrides_blueprint_prefix(self, app):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint, url_prefix='/reg')
with app.test_request_context('/reg/hi'):
assert request.endpoint == 'test.hello'
def test_url_with_api_and_blueprint_prefix(self, app):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restplus.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint)
with app.test_request_context('/bp/api/hi'):
assert request.endpoint == 'test.hello'
def test_error_routing(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app.register_blueprint(blueprint)
with app.test_request_context('/hi', method='POST'):
assert api._should_use_fr_error_handler() is True
assert api._has_fr_route() is True
with app.test_request_context('/bye'):
api._should_use_fr_error_handler = mocker.Mock(return_value=False)
assert api._has_fr_route() is True
def test_non_blueprint_rest_error_routing(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app.register_blueprint(blueprint, url_prefix='/blueprint')
api2 = restplus.Api(app)
api2.add_resource(HelloWorld(api), '/hi', endpoint="hello")
api2.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
with app.test_request_context('/hi', method='POST'):
assert api._should_use_fr_error_handler() is False
assert api2._should_use_fr_error_handler() is True
assert api._has_fr_route() is False
assert api2._has_fr_route() is True
with app.test_request_context('/blueprint/hi', method='POST'):
assert api._should_use_fr_error_handler() is True
assert api2._should_use_fr_error_handler() is False
assert api._has_fr_route() is True
assert api2._has_fr_route() is False
api._should_use_fr_error_handler = mocker.Mock(return_value=False)
api2._should_use_fr_error_handler = mocker.Mock(return_value=False)
with app.test_request_context('/bye'):
assert api._has_fr_route() is False
assert api2._has_fr_route() is True
with app.test_request_context('/blueprint/bye'):
assert api._has_fr_route() is True
assert api2._has_fr_route() is False
def test_non_blueprint_non_rest_error_routing(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app.register_blueprint(blueprint, url_prefix='/blueprint')
@app.route('/hi')
def hi():
return 'hi'
@app.route('/bye')
def bye():
flask.abort(404)
with app.test_request_context('/hi', method='POST'):
assert api._should_use_fr_error_handler() is False
assert api._has_fr_route() is False
with app.test_request_context('/blueprint/hi', method='POST'):
assert api._should_use_fr_error_handler() is True
assert api._has_fr_route() is True
api._should_use_fr_error_handler = mocker.Mock(return_value=False)
with app.test_request_context('/bye'):
assert api._has_fr_route() is False
with app.test_request_context('/blueprint/bye'):
assert api._has_fr_route() is True | 0.568775 | 0.126353 |
from esper.prelude import *
from .queries import query
@query("Conversations")
def conversations_for_display():
from query.models import FaceCharacterActor, Shot
from rekall.video_interval_collection import VideoIntervalCollection
from rekall.parsers import in_array, bbox_payload_parser, merge_dict_parsers, dict_payload_parser
from rekall.merge_ops import payload_plus
from rekall.payload_predicates import payload_satisfies
from rekall.spatial_predicates import scene_graph
from esper.rekall import intrvllists_to_result_bbox
from query.models import Face
from rekall.video_interval_collection import VideoIntervalCollection
from rekall.parsers import in_array, bbox_payload_parser
from rekall.merge_ops import payload_plus, merge_named_payload, payload_second
from esper.rekall import intrvllists_to_result_bbox
from rekall.payload_predicates import payload_satisfies
from rekall.list_predicates import length_at_most
from rekall.logical_predicates import and_pred, or_pred, true_pred
from rekall.spatial_predicates import scene_graph, make_region
from rekall.temporal_predicates import before, after, overlaps, equal
from rekall.bbox_predicates import height_at_least
from esper.rekall import intrvllists_to_result, intrvllists_to_result_with_objects, add_intrvllists_to_result
from esper.prelude import esper_widget
from rekall.interval_list import Interval, IntervalList
import esper.face_embeddings as face_embeddings
video_id=15
EMBEDDING_EQUALITY_THRESHOLD = 1.
ONE_FRAME = 1
faces_qs = Face.objects.annotate(
min_frame=F('frame__number'),
max_frame=F('frame__number'),
video_id=F('frame__video_id')
).filter(frame__video_id=video_id, frame__regularly_sampled=True)
faces_per_frame = VideoIntervalCollection.from_django_qs(
faces_qs,
with_payload=in_array(merge_dict_parsers([
bbox_payload_parser(VideoIntervalCollection.django_accessor),
dict_payload_parser(VideoIntervalCollection.django_accessor, { 'face_id': 'id' }),
]))
).coalesce(payload_merge_op=payload_plus)
shots_qs = Shot.objects.filter(cinematic=True)
shots = VideoIntervalCollection.from_django_qs(shots_qs)
shots_with_faces = shots.merge(
faces_per_frame,
predicate=overlaps(),
payload_merge_op=lambda shot_id, faces_in_frame: (shot_id, [faces_in_frame])
).coalesce(payload_merge_op=lambda p1, p2: (p1[0], p1[1] + p2[1]))
def cluster_center(face_ids):
# print("About to compute mean")
mean_embedding = face_embeddings.mean(face_ids)
# print("About to compute dist", face_ids)
dists = face_embeddings.dist(face_ids, [mean_embedding])
# print("Done computing dist")
return min(zip(dists, face_ids))[1]
def cluster_and_compute_centers(faces_in_frame_list, shot_id):
num_people = max(len(faces_in_frame) for faces_in_frame in faces_in_frame_list)
face_ids = [face['face_id'] for faces_in_frame in faces_in_frame_list for face in faces_in_frame]
face_heights = [face['y2'] - face['y1']
for faces_in_frame in faces_in_frame_list for face in faces_in_frame]
print(num_people)
if num_people == 1:
clusters = [(fid, 0) for fid in face_ids]
else:
clusters = face_embeddings.kmeans(face_ids, num_people)
# print("Done clustering")
centers = [
(
cluster_center([
face_id
for face_id, cluster_id in clusters
if cluster_id == i
]), [
face_id
for face_id, cluster_id in clusters
if cluster_id == i
],
shot_id,
max([
face_heights[face_ids.index(face_id)]
for face_id, cluster_id in clusters if cluster_id == i
])
)
for i in range(num_people)
]
# print("Done computing the center")
return centers
# print("About to compute clusters")
shots_with_centers = shots_with_faces.map(
lambda intrvl: (intrvl.start, intrvl.end,
(intrvl.payload[0],
cluster_and_compute_centers(intrvl.payload[1], intrvl.payload[0]))
)
)
# print("Clusters computed")
def same_face(center1, center2):
return face_embeddings.dist([center1], target_ids=[center2])[0] < EMBEDDING_EQUALITY_THRESHOLD
def cross_product_faces(intrvl1, intrvl2):
payload1 = intrvl1.get_payload()
payload2 = intrvl2.get_payload()
payload = []
for cluster1 in payload1[1]:
for cluster2 in payload2[1]:
if not same_face(cluster1[0], cluster2[0]):
new_payload = {'A': cluster1, 'B': cluster2}
payload.append(new_payload)
return [(min(intrvl1.get_start(), intrvl2.get_start()),
max(intrvl1.get_end(), intrvl2.get_end()), {
'chrs': payload,
'shots': [payload1[0], payload2[0]]
})]
two_shots = shots_with_centers.join(
shots_with_centers,
predicate=after(max_dist=ONE_FRAME, min_dist=ONE_FRAME),
merge_op=cross_product_faces
)
# print("Cross product done")
def faces_equal(payload1, payload2):
for face_pair1 in payload1['chrs']:
for face_pair2 in payload2['chrs']:
if (same_face(face_pair1['A'][0], face_pair2['A'][0]) and
same_face(face_pair1['B'][0], face_pair2['B'][0])):
return True
if (same_face(face_pair1['A'][0], face_pair2['B'][0]) and
same_face(face_pair1['B'][0], face_pair2['A'][0])):
return True
return False
convs = two_shots.coalesce(
predicate=payload_satisfies(faces_equal, arity=2),
payload_merge_op = lambda payload1, payload2: {
'chrs': payload1['chrs'] + payload2['chrs'],
'shots': payload1['shots'] + payload2['shots']
}
)
# print("Coalesce done")
adjacent_seq = convs.merge(
convs,
predicate=and_pred(
after(max_dist=ONE_FRAME, min_dist=ONE_FRAME),
payload_satisfies(faces_equal, arity=2),
arity=2),
payload_merge_op = lambda payload1, payload2: {
'chrs': payload1['chrs'] + payload2['chrs'],
'shots': payload1['shots'] + payload2['shots']
},
working_window=1
)
convs = convs.set_union(adjacent_seq)
# convs = convs.coalesce(predicate=times_equal, payload_merge_op=shots_equal)
# print("Two-shot adjacencies done")
def filter_fn(intvl):
payload = intvl.get_payload()
if type(payload) is dict and 'shots' in payload:
return len(set(payload['shots'])) >= 3
return False
convs = convs.filter(filter_fn)
convs = convs.coalesce()
# print("Final filter done")
# for video_id in convs.intervals.keys():
# print(video_id)
# intvllist = convs.get_intervallist(video_id)
# for intvl in intvllist.get_intervals():
# print(intvl.payload)
# print(str(intvl.start) + ':' + str(intvl.end))
return intervallists_to_result_with_objects(convs, lambda a, b: []) | app/esper/queries/conversations.py | from esper.prelude import *
from .queries import query
@query("Conversations")
def conversations_for_display():
from query.models import FaceCharacterActor, Shot
from rekall.video_interval_collection import VideoIntervalCollection
from rekall.parsers import in_array, bbox_payload_parser, merge_dict_parsers, dict_payload_parser
from rekall.merge_ops import payload_plus
from rekall.payload_predicates import payload_satisfies
from rekall.spatial_predicates import scene_graph
from esper.rekall import intrvllists_to_result_bbox
from query.models import Face
from rekall.video_interval_collection import VideoIntervalCollection
from rekall.parsers import in_array, bbox_payload_parser
from rekall.merge_ops import payload_plus, merge_named_payload, payload_second
from esper.rekall import intrvllists_to_result_bbox
from rekall.payload_predicates import payload_satisfies
from rekall.list_predicates import length_at_most
from rekall.logical_predicates import and_pred, or_pred, true_pred
from rekall.spatial_predicates import scene_graph, make_region
from rekall.temporal_predicates import before, after, overlaps, equal
from rekall.bbox_predicates import height_at_least
from esper.rekall import intrvllists_to_result, intrvllists_to_result_with_objects, add_intrvllists_to_result
from esper.prelude import esper_widget
from rekall.interval_list import Interval, IntervalList
import esper.face_embeddings as face_embeddings
video_id=15
EMBEDDING_EQUALITY_THRESHOLD = 1.
ONE_FRAME = 1
faces_qs = Face.objects.annotate(
min_frame=F('frame__number'),
max_frame=F('frame__number'),
video_id=F('frame__video_id')
).filter(frame__video_id=video_id, frame__regularly_sampled=True)
faces_per_frame = VideoIntervalCollection.from_django_qs(
faces_qs,
with_payload=in_array(merge_dict_parsers([
bbox_payload_parser(VideoIntervalCollection.django_accessor),
dict_payload_parser(VideoIntervalCollection.django_accessor, { 'face_id': 'id' }),
]))
).coalesce(payload_merge_op=payload_plus)
shots_qs = Shot.objects.filter(cinematic=True)
shots = VideoIntervalCollection.from_django_qs(shots_qs)
shots_with_faces = shots.merge(
faces_per_frame,
predicate=overlaps(),
payload_merge_op=lambda shot_id, faces_in_frame: (shot_id, [faces_in_frame])
).coalesce(payload_merge_op=lambda p1, p2: (p1[0], p1[1] + p2[1]))
def cluster_center(face_ids):
# print("About to compute mean")
mean_embedding = face_embeddings.mean(face_ids)
# print("About to compute dist", face_ids)
dists = face_embeddings.dist(face_ids, [mean_embedding])
# print("Done computing dist")
return min(zip(dists, face_ids))[1]
def cluster_and_compute_centers(faces_in_frame_list, shot_id):
num_people = max(len(faces_in_frame) for faces_in_frame in faces_in_frame_list)
face_ids = [face['face_id'] for faces_in_frame in faces_in_frame_list for face in faces_in_frame]
face_heights = [face['y2'] - face['y1']
for faces_in_frame in faces_in_frame_list for face in faces_in_frame]
print(num_people)
if num_people == 1:
clusters = [(fid, 0) for fid in face_ids]
else:
clusters = face_embeddings.kmeans(face_ids, num_people)
# print("Done clustering")
centers = [
(
cluster_center([
face_id
for face_id, cluster_id in clusters
if cluster_id == i
]), [
face_id
for face_id, cluster_id in clusters
if cluster_id == i
],
shot_id,
max([
face_heights[face_ids.index(face_id)]
for face_id, cluster_id in clusters if cluster_id == i
])
)
for i in range(num_people)
]
# print("Done computing the center")
return centers
# print("About to compute clusters")
shots_with_centers = shots_with_faces.map(
lambda intrvl: (intrvl.start, intrvl.end,
(intrvl.payload[0],
cluster_and_compute_centers(intrvl.payload[1], intrvl.payload[0]))
)
)
# print("Clusters computed")
def same_face(center1, center2):
return face_embeddings.dist([center1], target_ids=[center2])[0] < EMBEDDING_EQUALITY_THRESHOLD
def cross_product_faces(intrvl1, intrvl2):
payload1 = intrvl1.get_payload()
payload2 = intrvl2.get_payload()
payload = []
for cluster1 in payload1[1]:
for cluster2 in payload2[1]:
if not same_face(cluster1[0], cluster2[0]):
new_payload = {'A': cluster1, 'B': cluster2}
payload.append(new_payload)
return [(min(intrvl1.get_start(), intrvl2.get_start()),
max(intrvl1.get_end(), intrvl2.get_end()), {
'chrs': payload,
'shots': [payload1[0], payload2[0]]
})]
two_shots = shots_with_centers.join(
shots_with_centers,
predicate=after(max_dist=ONE_FRAME, min_dist=ONE_FRAME),
merge_op=cross_product_faces
)
# print("Cross product done")
def faces_equal(payload1, payload2):
for face_pair1 in payload1['chrs']:
for face_pair2 in payload2['chrs']:
if (same_face(face_pair1['A'][0], face_pair2['A'][0]) and
same_face(face_pair1['B'][0], face_pair2['B'][0])):
return True
if (same_face(face_pair1['A'][0], face_pair2['B'][0]) and
same_face(face_pair1['B'][0], face_pair2['A'][0])):
return True
return False
convs = two_shots.coalesce(
predicate=payload_satisfies(faces_equal, arity=2),
payload_merge_op = lambda payload1, payload2: {
'chrs': payload1['chrs'] + payload2['chrs'],
'shots': payload1['shots'] + payload2['shots']
}
)
# print("Coalesce done")
adjacent_seq = convs.merge(
convs,
predicate=and_pred(
after(max_dist=ONE_FRAME, min_dist=ONE_FRAME),
payload_satisfies(faces_equal, arity=2),
arity=2),
payload_merge_op = lambda payload1, payload2: {
'chrs': payload1['chrs'] + payload2['chrs'],
'shots': payload1['shots'] + payload2['shots']
},
working_window=1
)
convs = convs.set_union(adjacent_seq)
# convs = convs.coalesce(predicate=times_equal, payload_merge_op=shots_equal)
# print("Two-shot adjacencies done")
def filter_fn(intvl):
payload = intvl.get_payload()
if type(payload) is dict and 'shots' in payload:
return len(set(payload['shots'])) >= 3
return False
convs = convs.filter(filter_fn)
convs = convs.coalesce()
# print("Final filter done")
# for video_id in convs.intervals.keys():
# print(video_id)
# intvllist = convs.get_intervallist(video_id)
# for intvl in intvllist.get_intervals():
# print(intvl.payload)
# print(str(intvl.start) + ':' + str(intvl.end))
return intervallists_to_result_with_objects(convs, lambda a, b: []) | 0.228845 | 0.185394 |
from datetime import datetime
from typing import Callable, List, Optional, Union
import pandas as pd
import pyarrow
import pytz
from pydantic.typing import Literal
from feast import FileSource, OnDemandFeatureView
from feast.data_source import DataSource
from feast.errors import FeastJoinKeysDuringMaterialization
from feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_VAL, FeatureView
from feast.infra.offline_stores.offline_store import OfflineStore, RetrievalJob
from feast.infra.offline_stores.offline_utils import (
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL,
)
from feast.infra.provider import (
_get_requested_feature_views_to_features_dict,
_run_field_mapping,
)
from feast.registry import Registry
from feast.repo_config import FeastConfigBaseModel, RepoConfig
from feast.usage import log_exceptions_and_usage
class FileOfflineStoreConfig(FeastConfigBaseModel):
""" Offline store config for local (file-based) store """
type: Literal["file"] = "file"
""" Offline store type selector"""
class FileRetrievalJob(RetrievalJob):
def __init__(
self,
evaluation_function: Callable,
full_feature_names: bool,
on_demand_feature_views: Optional[List[OnDemandFeatureView]],
):
"""Initialize a lazy historical retrieval job"""
# The evaluation function executes a stored procedure to compute a historical retrieval.
self.evaluation_function = evaluation_function
self._full_feature_names = full_feature_names
self._on_demand_feature_views = on_demand_feature_views
@property
def full_feature_names(self) -> bool:
return self._full_feature_names
@property
def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]:
return self._on_demand_feature_views
@log_exceptions_and_usage
def _to_df_internal(self) -> pd.DataFrame:
# Only execute the evaluation function to build the final historical retrieval dataframe at the last moment.
df = self.evaluation_function()
return df
@log_exceptions_and_usage
def _to_arrow_internal(self):
# Only execute the evaluation function to build the final historical retrieval dataframe at the last moment.
df = self.evaluation_function()
return pyarrow.Table.from_pandas(df)
class FileOfflineStore(OfflineStore):
@staticmethod
@log_exceptions_and_usage(offline_store="file")
def get_historical_features(
config: RepoConfig,
feature_views: List[FeatureView],
feature_refs: List[str],
entity_df: Union[pd.DataFrame, str],
registry: Registry,
project: str,
full_feature_names: bool = False,
) -> RetrievalJob:
if not isinstance(entity_df, pd.DataFrame):
raise ValueError(
f"Please provide an entity_df of type {type(pd.DataFrame)} instead of type {type(entity_df)}"
)
entity_df_event_timestamp_col = DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL # local modifiable copy of global variable
if entity_df_event_timestamp_col not in entity_df.columns:
datetime_columns = entity_df.select_dtypes(
include=["datetime", "datetimetz"]
).columns
if len(datetime_columns) == 1:
print(
f"Using {datetime_columns[0]} as the event timestamp. To specify a column explicitly, please name it {DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL}."
)
entity_df_event_timestamp_col = datetime_columns[0]
else:
raise ValueError(
f"Please provide an entity_df with a column named {DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL} representing the time of events."
)
(
feature_views_to_features,
on_demand_feature_views_to_features,
) = _get_requested_feature_views_to_features_dict(
feature_refs,
feature_views,
registry.list_on_demand_feature_views(config.project),
)
# Create lazy function that is only called from the RetrievalJob object
def evaluate_historical_retrieval():
# Make sure all event timestamp fields are tz-aware. We default tz-naive fields to UTC
entity_df[entity_df_event_timestamp_col] = entity_df[
entity_df_event_timestamp_col
].apply(lambda x: x if x.tzinfo is not None else x.replace(tzinfo=pytz.utc))
# Create a copy of entity_df to prevent modifying the original
entity_df_with_features = entity_df.copy()
# Convert event timestamp column to datetime and normalize time zone to UTC
# This is necessary to avoid issues with pd.merge_asof
entity_df_with_features[entity_df_event_timestamp_col] = pd.to_datetime(
entity_df_with_features[entity_df_event_timestamp_col], utc=True
)
# Sort event timestamp values
entity_df_with_features = entity_df_with_features.sort_values(
entity_df_event_timestamp_col
)
# Load feature view data from sources and join them incrementally
for feature_view, features in feature_views_to_features.items():
event_timestamp_column = (
feature_view.batch_source.event_timestamp_column
)
created_timestamp_column = (
feature_view.batch_source.created_timestamp_column
)
# Read offline parquet data in pyarrow format.
filesystem, path = FileSource.create_filesystem_and_path(
feature_view.batch_source.path,
feature_view.batch_source.file_options.s3_endpoint_override,
)
table = pyarrow.parquet.read_table(path, filesystem=filesystem)
# Rename columns by the field mapping dictionary if it exists
if feature_view.batch_source.field_mapping is not None:
table = _run_field_mapping(
table, feature_view.batch_source.field_mapping
)
# Rename entity columns by the join_key_map dictionary if it exists
if feature_view.projection.join_key_map:
table = _run_field_mapping(
table, feature_view.projection.join_key_map
)
# Convert pyarrow table to pandas dataframe. Note, if the underlying data has missing values,
# pandas will convert those values to np.nan if the dtypes are numerical (floats, ints, etc.) or boolean
# If the dtype is 'object', then missing values are inferred as python `None`s.
# More details at:
# https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html#values-considered-missing
df_to_join = table.to_pandas()
# Make sure all timestamp fields are tz-aware. We default tz-naive fields to UTC
df_to_join[event_timestamp_column] = df_to_join[
event_timestamp_column
].apply(
lambda x: x if x.tzinfo is not None else x.replace(tzinfo=pytz.utc)
)
if created_timestamp_column:
df_to_join[created_timestamp_column] = df_to_join[
created_timestamp_column
].apply(
lambda x: x
if x.tzinfo is not None
else x.replace(tzinfo=pytz.utc)
)
# Sort dataframe by the event timestamp column
df_to_join = df_to_join.sort_values(event_timestamp_column)
# Build a list of all the features we should select from this source
feature_names = []
for feature in features:
# Modify the separator for feature refs in column names to double underscore. We are using
# double underscore as separator for consistency with other databases like BigQuery,
# where there are very few characters available for use as separators
if full_feature_names:
formatted_feature_name = (
f"{feature_view.projection.name_to_use()}__{feature}"
)
else:
formatted_feature_name = feature
# Add the feature name to the list of columns
feature_names.append(formatted_feature_name)
# Ensure that the source dataframe feature column includes the feature view name as a prefix
df_to_join.rename(
columns={feature: formatted_feature_name}, inplace=True,
)
# Build a list of entity columns to join on (from the right table)
join_keys = []
for entity_name in feature_view.entities:
entity = registry.get_entity(entity_name, project)
join_key = feature_view.projection.join_key_map.get(
entity.join_key, entity.join_key
)
join_keys.append(join_key)
right_entity_columns = join_keys
right_entity_key_columns = [
event_timestamp_column
] + right_entity_columns
# Remove all duplicate entity keys (using created timestamp)
right_entity_key_sort_columns = right_entity_key_columns
if created_timestamp_column:
# If created_timestamp is available, use it to dedupe deterministically
right_entity_key_sort_columns = right_entity_key_sort_columns + [
created_timestamp_column
]
df_to_join.sort_values(by=right_entity_key_sort_columns, inplace=True)
df_to_join.drop_duplicates(
right_entity_key_sort_columns,
keep="last",
ignore_index=True,
inplace=True,
)
# Select only the columns we need to join from the feature dataframe
df_to_join = df_to_join[right_entity_key_columns + feature_names]
# Do point in-time-join between entity_df and feature dataframe
entity_df_with_features = pd.merge_asof(
entity_df_with_features,
df_to_join,
left_on=entity_df_event_timestamp_col,
right_on=event_timestamp_column,
by=right_entity_columns or None,
tolerance=feature_view.ttl,
)
# Remove right (feature table/view) event_timestamp column.
if event_timestamp_column != entity_df_event_timestamp_col:
entity_df_with_features.drop(
columns=[event_timestamp_column], inplace=True
)
# Ensure that we delete dataframes to free up memory
del df_to_join
# Move "event_timestamp" column to front
current_cols = entity_df_with_features.columns.tolist()
current_cols.remove(entity_df_event_timestamp_col)
entity_df_with_features = entity_df_with_features[
[entity_df_event_timestamp_col] + current_cols
]
return entity_df_with_features
job = FileRetrievalJob(
evaluation_function=evaluate_historical_retrieval,
full_feature_names=full_feature_names,
on_demand_feature_views=OnDemandFeatureView.get_requested_odfvs(
feature_refs, project, registry
),
)
return job
@staticmethod
@log_exceptions_and_usage(offline_store="file")
def pull_latest_from_table_or_query(
config: RepoConfig,
data_source: DataSource,
join_key_columns: List[str],
feature_name_columns: List[str],
event_timestamp_column: str,
created_timestamp_column: Optional[str],
start_date: datetime,
end_date: datetime,
) -> RetrievalJob:
assert isinstance(data_source, FileSource)
# Create lazy function that is only called from the RetrievalJob object
def evaluate_offline_job():
filesystem, path = FileSource.create_filesystem_and_path(
data_source.path, data_source.file_options.s3_endpoint_override
)
source_df = pd.read_parquet(path, filesystem=filesystem)
# Make sure all timestamp fields are tz-aware. We default tz-naive fields to UTC
source_df[event_timestamp_column] = source_df[event_timestamp_column].apply(
lambda x: x if x.tzinfo is not None else x.replace(tzinfo=pytz.utc)
)
if created_timestamp_column:
source_df[created_timestamp_column] = source_df[
created_timestamp_column
].apply(
lambda x: x if x.tzinfo is not None else x.replace(tzinfo=pytz.utc)
)
source_columns = set(source_df.columns)
if not set(join_key_columns).issubset(source_columns):
raise FeastJoinKeysDuringMaterialization(
data_source.path, set(join_key_columns), source_columns
)
ts_columns = (
[event_timestamp_column, created_timestamp_column]
if created_timestamp_column
else [event_timestamp_column]
)
source_df.sort_values(by=ts_columns, inplace=True)
filtered_df = source_df[
(source_df[event_timestamp_column] >= start_date)
& (source_df[event_timestamp_column] < end_date)
]
columns_to_extract = set(
join_key_columns + feature_name_columns + ts_columns
)
if join_key_columns:
last_values_df = filtered_df.drop_duplicates(
join_key_columns, keep="last", ignore_index=True
)
else:
last_values_df = filtered_df
last_values_df[DUMMY_ENTITY_ID] = DUMMY_ENTITY_VAL
columns_to_extract.add(DUMMY_ENTITY_ID)
return last_values_df[columns_to_extract]
# When materializing a single feature view, we don't need full feature names. On demand transforms aren't materialized
return FileRetrievalJob(
evaluation_function=evaluate_offline_job,
full_feature_names=False,
on_demand_feature_views=None,
) | sdk/python/feast/infra/offline_stores/file.py | from datetime import datetime
from typing import Callable, List, Optional, Union
import pandas as pd
import pyarrow
import pytz
from pydantic.typing import Literal
from feast import FileSource, OnDemandFeatureView
from feast.data_source import DataSource
from feast.errors import FeastJoinKeysDuringMaterialization
from feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_VAL, FeatureView
from feast.infra.offline_stores.offline_store import OfflineStore, RetrievalJob
from feast.infra.offline_stores.offline_utils import (
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL,
)
from feast.infra.provider import (
_get_requested_feature_views_to_features_dict,
_run_field_mapping,
)
from feast.registry import Registry
from feast.repo_config import FeastConfigBaseModel, RepoConfig
from feast.usage import log_exceptions_and_usage
class FileOfflineStoreConfig(FeastConfigBaseModel):
""" Offline store config for local (file-based) store """
type: Literal["file"] = "file"
""" Offline store type selector"""
class FileRetrievalJob(RetrievalJob):
def __init__(
self,
evaluation_function: Callable,
full_feature_names: bool,
on_demand_feature_views: Optional[List[OnDemandFeatureView]],
):
"""Initialize a lazy historical retrieval job"""
# The evaluation function executes a stored procedure to compute a historical retrieval.
self.evaluation_function = evaluation_function
self._full_feature_names = full_feature_names
self._on_demand_feature_views = on_demand_feature_views
@property
def full_feature_names(self) -> bool:
return self._full_feature_names
@property
def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]:
return self._on_demand_feature_views
@log_exceptions_and_usage
def _to_df_internal(self) -> pd.DataFrame:
# Only execute the evaluation function to build the final historical retrieval dataframe at the last moment.
df = self.evaluation_function()
return df
@log_exceptions_and_usage
def _to_arrow_internal(self):
# Only execute the evaluation function to build the final historical retrieval dataframe at the last moment.
df = self.evaluation_function()
return pyarrow.Table.from_pandas(df)
class FileOfflineStore(OfflineStore):
@staticmethod
@log_exceptions_and_usage(offline_store="file")
def get_historical_features(
config: RepoConfig,
feature_views: List[FeatureView],
feature_refs: List[str],
entity_df: Union[pd.DataFrame, str],
registry: Registry,
project: str,
full_feature_names: bool = False,
) -> RetrievalJob:
if not isinstance(entity_df, pd.DataFrame):
raise ValueError(
f"Please provide an entity_df of type {type(pd.DataFrame)} instead of type {type(entity_df)}"
)
entity_df_event_timestamp_col = DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL # local modifiable copy of global variable
if entity_df_event_timestamp_col not in entity_df.columns:
datetime_columns = entity_df.select_dtypes(
include=["datetime", "datetimetz"]
).columns
if len(datetime_columns) == 1:
print(
f"Using {datetime_columns[0]} as the event timestamp. To specify a column explicitly, please name it {DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL}."
)
entity_df_event_timestamp_col = datetime_columns[0]
else:
raise ValueError(
f"Please provide an entity_df with a column named {DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL} representing the time of events."
)
(
feature_views_to_features,
on_demand_feature_views_to_features,
) = _get_requested_feature_views_to_features_dict(
feature_refs,
feature_views,
registry.list_on_demand_feature_views(config.project),
)
# Create lazy function that is only called from the RetrievalJob object
def evaluate_historical_retrieval():
# Make sure all event timestamp fields are tz-aware. We default tz-naive fields to UTC
entity_df[entity_df_event_timestamp_col] = entity_df[
entity_df_event_timestamp_col
].apply(lambda x: x if x.tzinfo is not None else x.replace(tzinfo=pytz.utc))
# Create a copy of entity_df to prevent modifying the original
entity_df_with_features = entity_df.copy()
# Convert event timestamp column to datetime and normalize time zone to UTC
# This is necessary to avoid issues with pd.merge_asof
entity_df_with_features[entity_df_event_timestamp_col] = pd.to_datetime(
entity_df_with_features[entity_df_event_timestamp_col], utc=True
)
# Sort event timestamp values
entity_df_with_features = entity_df_with_features.sort_values(
entity_df_event_timestamp_col
)
# Load feature view data from sources and join them incrementally
for feature_view, features in feature_views_to_features.items():
event_timestamp_column = (
feature_view.batch_source.event_timestamp_column
)
created_timestamp_column = (
feature_view.batch_source.created_timestamp_column
)
# Read offline parquet data in pyarrow format.
filesystem, path = FileSource.create_filesystem_and_path(
feature_view.batch_source.path,
feature_view.batch_source.file_options.s3_endpoint_override,
)
table = pyarrow.parquet.read_table(path, filesystem=filesystem)
# Rename columns by the field mapping dictionary if it exists
if feature_view.batch_source.field_mapping is not None:
table = _run_field_mapping(
table, feature_view.batch_source.field_mapping
)
# Rename entity columns by the join_key_map dictionary if it exists
if feature_view.projection.join_key_map:
table = _run_field_mapping(
table, feature_view.projection.join_key_map
)
# Convert pyarrow table to pandas dataframe. Note, if the underlying data has missing values,
# pandas will convert those values to np.nan if the dtypes are numerical (floats, ints, etc.) or boolean
# If the dtype is 'object', then missing values are inferred as python `None`s.
# More details at:
# https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html#values-considered-missing
df_to_join = table.to_pandas()
# Make sure all timestamp fields are tz-aware. We default tz-naive fields to UTC
df_to_join[event_timestamp_column] = df_to_join[
event_timestamp_column
].apply(
lambda x: x if x.tzinfo is not None else x.replace(tzinfo=pytz.utc)
)
if created_timestamp_column:
df_to_join[created_timestamp_column] = df_to_join[
created_timestamp_column
].apply(
lambda x: x
if x.tzinfo is not None
else x.replace(tzinfo=pytz.utc)
)
# Sort dataframe by the event timestamp column
df_to_join = df_to_join.sort_values(event_timestamp_column)
# Build a list of all the features we should select from this source
feature_names = []
for feature in features:
# Modify the separator for feature refs in column names to double underscore. We are using
# double underscore as separator for consistency with other databases like BigQuery,
# where there are very few characters available for use as separators
if full_feature_names:
formatted_feature_name = (
f"{feature_view.projection.name_to_use()}__{feature}"
)
else:
formatted_feature_name = feature
# Add the feature name to the list of columns
feature_names.append(formatted_feature_name)
# Ensure that the source dataframe feature column includes the feature view name as a prefix
df_to_join.rename(
columns={feature: formatted_feature_name}, inplace=True,
)
# Build a list of entity columns to join on (from the right table)
join_keys = []
for entity_name in feature_view.entities:
entity = registry.get_entity(entity_name, project)
join_key = feature_view.projection.join_key_map.get(
entity.join_key, entity.join_key
)
join_keys.append(join_key)
right_entity_columns = join_keys
right_entity_key_columns = [
event_timestamp_column
] + right_entity_columns
# Remove all duplicate entity keys (using created timestamp)
right_entity_key_sort_columns = right_entity_key_columns
if created_timestamp_column:
# If created_timestamp is available, use it to dedupe deterministically
right_entity_key_sort_columns = right_entity_key_sort_columns + [
created_timestamp_column
]
df_to_join.sort_values(by=right_entity_key_sort_columns, inplace=True)
df_to_join.drop_duplicates(
right_entity_key_sort_columns,
keep="last",
ignore_index=True,
inplace=True,
)
# Select only the columns we need to join from the feature dataframe
df_to_join = df_to_join[right_entity_key_columns + feature_names]
# Do point in-time-join between entity_df and feature dataframe
entity_df_with_features = pd.merge_asof(
entity_df_with_features,
df_to_join,
left_on=entity_df_event_timestamp_col,
right_on=event_timestamp_column,
by=right_entity_columns or None,
tolerance=feature_view.ttl,
)
# Remove right (feature table/view) event_timestamp column.
if event_timestamp_column != entity_df_event_timestamp_col:
entity_df_with_features.drop(
columns=[event_timestamp_column], inplace=True
)
# Ensure that we delete dataframes to free up memory
del df_to_join
# Move "event_timestamp" column to front
current_cols = entity_df_with_features.columns.tolist()
current_cols.remove(entity_df_event_timestamp_col)
entity_df_with_features = entity_df_with_features[
[entity_df_event_timestamp_col] + current_cols
]
return entity_df_with_features
job = FileRetrievalJob(
evaluation_function=evaluate_historical_retrieval,
full_feature_names=full_feature_names,
on_demand_feature_views=OnDemandFeatureView.get_requested_odfvs(
feature_refs, project, registry
),
)
return job
@staticmethod
@log_exceptions_and_usage(offline_store="file")
def pull_latest_from_table_or_query(
config: RepoConfig,
data_source: DataSource,
join_key_columns: List[str],
feature_name_columns: List[str],
event_timestamp_column: str,
created_timestamp_column: Optional[str],
start_date: datetime,
end_date: datetime,
) -> RetrievalJob:
assert isinstance(data_source, FileSource)
# Create lazy function that is only called from the RetrievalJob object
def evaluate_offline_job():
filesystem, path = FileSource.create_filesystem_and_path(
data_source.path, data_source.file_options.s3_endpoint_override
)
source_df = pd.read_parquet(path, filesystem=filesystem)
# Make sure all timestamp fields are tz-aware. We default tz-naive fields to UTC
source_df[event_timestamp_column] = source_df[event_timestamp_column].apply(
lambda x: x if x.tzinfo is not None else x.replace(tzinfo=pytz.utc)
)
if created_timestamp_column:
source_df[created_timestamp_column] = source_df[
created_timestamp_column
].apply(
lambda x: x if x.tzinfo is not None else x.replace(tzinfo=pytz.utc)
)
source_columns = set(source_df.columns)
if not set(join_key_columns).issubset(source_columns):
raise FeastJoinKeysDuringMaterialization(
data_source.path, set(join_key_columns), source_columns
)
ts_columns = (
[event_timestamp_column, created_timestamp_column]
if created_timestamp_column
else [event_timestamp_column]
)
source_df.sort_values(by=ts_columns, inplace=True)
filtered_df = source_df[
(source_df[event_timestamp_column] >= start_date)
& (source_df[event_timestamp_column] < end_date)
]
columns_to_extract = set(
join_key_columns + feature_name_columns + ts_columns
)
if join_key_columns:
last_values_df = filtered_df.drop_duplicates(
join_key_columns, keep="last", ignore_index=True
)
else:
last_values_df = filtered_df
last_values_df[DUMMY_ENTITY_ID] = DUMMY_ENTITY_VAL
columns_to_extract.add(DUMMY_ENTITY_ID)
return last_values_df[columns_to_extract]
# When materializing a single feature view, we don't need full feature names. On demand transforms aren't materialized
return FileRetrievalJob(
evaluation_function=evaluate_offline_job,
full_feature_names=False,
on_demand_feature_views=None,
) | 0.887935 | 0.32748 |
from aioclustermanager.k8s.caller import K8SCaller
from base64 import b64decode
import aiohttp
import asyncio
import logging
import os
import ssl
import tempfile
logger = logging.getLogger('aioclustermanager')
SERVICE_HOST_ENV_NAME = "KUBERNETES_SERVICE_HOST"
SERVICE_PORT_ENV_NAME = "KUBERNETES_SERVICE_PORT"
SERVICE_TOKEN_ENV_NAME = "KUBERNETES_SERVICE_TOKEN"
SERVICE_TOKEN_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/token"
SERVICE_CERT_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
def _join_host_port(host, port):
template = "%s:%s"
host_requires_bracketing = ':' in host or '%' in host
if host_requires_bracketing:
template = "[%s]:%s"
return template % (host, port)
class Configuration:
file = None
ssl_context = None
cert_file = None
scheme = 'https'
def __init__(self, environment, loop=None):
self.headers = {}
if loop is None:
self.loop = asyncio.get_event_loop()
else:
self.loop = loop
self.environment = environment
if environment.get('in_cluster'):
if SERVICE_TOKEN_ENV_NAME in os.environ:
token = os.environ[SERVICE_TOKEN_ENV_NAME]
else:
with open(SERVICE_TOKEN_FILENAME) as fi:
token = fi.read()
self.headers = {
'Authorization': 'Bearer ' + token
}
environment.update({
'skip_ssl': True,
'endpoint': _join_host_port(
os.environ[SERVICE_HOST_ENV_NAME],
os.environ[SERVICE_PORT_ENV_NAME])
})
if environment.get('certificate') is not None:
# Certificate management
self.load_certificate()
elif environment.get('certificate_file') is not None:
self.load_certificate_file()
elif environment.get('user') and environment.get('credentials'):
self.load_basic_auth()
elif environment.get('skip_ssl'):
self.load_skip_ssl()
ssl_context = None
if environment.get('ca') is not None:
self.file = tempfile.NamedTemporaryFile(delete=False)
self.file.write(bytes(environment['ca'], encoding='utf-8'))
self.file.close()
self.ssl_context = ssl.SSLContext()
ssl_context.load_verify_locations(self.file.name)
elif environment.get('ca_file') is not None:
self.ssl_context = ssl.SSLContext()
self.ssl_context.load_verify_locations(environment['ca_file'])
if environment['skip_ssl']:
self.verify = False
else:
self.verify = True
if 'http_scheme' in environment:
self.scheme = environment['http_scheme']
def load_skip_ssl(self):
self.session = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(verify_ssl=False, loop=self.loop),
headers=self.headers, loop=self.loop)
def load_basic_auth(self):
basic_auth = aiohttp.BasicAuth(
self.environment['user'], self.environment['credentials'])
self.session = aiohttp.ClientSession(
auth=basic_auth, headers=self.headers, loop=self.loop)
def load_certificate_file(self):
logger.debug('Loading cert files')
ssl_client_context = ssl.create_default_context(
purpose=ssl.Purpose.CLIENT_AUTH)
if 'key_file' in self.environment:
ssl_client_context.load_cert_chain(
certfile=self.environment['certificate_file'],
keyfile=self.environment['key_file'])
else:
ssl_client_context.load_cert_chain(
certfile=self.environment['certificate_file'])
conn = aiohttp.TCPConnector(
ssl_context=ssl_client_context, loop=self.loop)
self.session = aiohttp.ClientSession(
connector=conn, headers=self.headers, loop=self.loop)
def load_certificate(self):
ssl_client_context = ssl.create_default_context(
purpose=ssl.Purpose.CLIENT_AUTH)
self.cert_file = tempfile.NamedTemporaryFile(delete=False)
self.cert_file.write(b64decode(self.environment['certificate']))
self.cert_file.close()
self.client_key = tempfile.NamedTemporaryFile(delete=False)
self.client_key.write(b64decode(self.environment['key']))
self.client_key.close()
ssl_client_context.load_cert_chain(
certfile=self.cert_file.name, keyfile=self.client_key.name)
conn = aiohttp.TCPConnector(
ssl_context=ssl_client_context, loop=self.loop)
self.session = aiohttp.ClientSession(
connector=conn, loop=self.loop, headers=self.headers)
class K8SContextManager:
def __init__(self, environment, loop=None):
self.environment = environment
if loop is None:
self.loop = asyncio.get_event_loop()
else:
self.loop = loop
async def __aenter__(self):
return await self.open()
async def open(self):
self.config = Configuration(self.environment, self.loop)
return K8SCaller(
self.config.ssl_context,
self.environment['endpoint'],
self.config.session,
verify=self.config.verify,
scheme=self.config.scheme)
async def __aexit__(self, exc_type, exc, tb):
await self.close()
async def close(self):
if self.config.file is not None:
os.unlink(self.config.file.name)
await self.config.session.close()
async def create_k8s_caller(environment):
config = Configuration(environment)
return K8SCaller(
config.ssl_context,
environment['endpoint'],
config.session,
verify=config.verify,
scheme=config.scheme) | aioclustermanager/k8s/__init__.py | from aioclustermanager.k8s.caller import K8SCaller
from base64 import b64decode
import aiohttp
import asyncio
import logging
import os
import ssl
import tempfile
logger = logging.getLogger('aioclustermanager')
SERVICE_HOST_ENV_NAME = "KUBERNETES_SERVICE_HOST"
SERVICE_PORT_ENV_NAME = "KUBERNETES_SERVICE_PORT"
SERVICE_TOKEN_ENV_NAME = "KUBERNETES_SERVICE_TOKEN"
SERVICE_TOKEN_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/token"
SERVICE_CERT_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
def _join_host_port(host, port):
template = "%s:%s"
host_requires_bracketing = ':' in host or '%' in host
if host_requires_bracketing:
template = "[%s]:%s"
return template % (host, port)
class Configuration:
file = None
ssl_context = None
cert_file = None
scheme = 'https'
def __init__(self, environment, loop=None):
self.headers = {}
if loop is None:
self.loop = asyncio.get_event_loop()
else:
self.loop = loop
self.environment = environment
if environment.get('in_cluster'):
if SERVICE_TOKEN_ENV_NAME in os.environ:
token = os.environ[SERVICE_TOKEN_ENV_NAME]
else:
with open(SERVICE_TOKEN_FILENAME) as fi:
token = fi.read()
self.headers = {
'Authorization': 'Bearer ' + token
}
environment.update({
'skip_ssl': True,
'endpoint': _join_host_port(
os.environ[SERVICE_HOST_ENV_NAME],
os.environ[SERVICE_PORT_ENV_NAME])
})
if environment.get('certificate') is not None:
# Certificate management
self.load_certificate()
elif environment.get('certificate_file') is not None:
self.load_certificate_file()
elif environment.get('user') and environment.get('credentials'):
self.load_basic_auth()
elif environment.get('skip_ssl'):
self.load_skip_ssl()
ssl_context = None
if environment.get('ca') is not None:
self.file = tempfile.NamedTemporaryFile(delete=False)
self.file.write(bytes(environment['ca'], encoding='utf-8'))
self.file.close()
self.ssl_context = ssl.SSLContext()
ssl_context.load_verify_locations(self.file.name)
elif environment.get('ca_file') is not None:
self.ssl_context = ssl.SSLContext()
self.ssl_context.load_verify_locations(environment['ca_file'])
if environment['skip_ssl']:
self.verify = False
else:
self.verify = True
if 'http_scheme' in environment:
self.scheme = environment['http_scheme']
def load_skip_ssl(self):
self.session = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(verify_ssl=False, loop=self.loop),
headers=self.headers, loop=self.loop)
def load_basic_auth(self):
basic_auth = aiohttp.BasicAuth(
self.environment['user'], self.environment['credentials'])
self.session = aiohttp.ClientSession(
auth=basic_auth, headers=self.headers, loop=self.loop)
def load_certificate_file(self):
logger.debug('Loading cert files')
ssl_client_context = ssl.create_default_context(
purpose=ssl.Purpose.CLIENT_AUTH)
if 'key_file' in self.environment:
ssl_client_context.load_cert_chain(
certfile=self.environment['certificate_file'],
keyfile=self.environment['key_file'])
else:
ssl_client_context.load_cert_chain(
certfile=self.environment['certificate_file'])
conn = aiohttp.TCPConnector(
ssl_context=ssl_client_context, loop=self.loop)
self.session = aiohttp.ClientSession(
connector=conn, headers=self.headers, loop=self.loop)
def load_certificate(self):
ssl_client_context = ssl.create_default_context(
purpose=ssl.Purpose.CLIENT_AUTH)
self.cert_file = tempfile.NamedTemporaryFile(delete=False)
self.cert_file.write(b64decode(self.environment['certificate']))
self.cert_file.close()
self.client_key = tempfile.NamedTemporaryFile(delete=False)
self.client_key.write(b64decode(self.environment['key']))
self.client_key.close()
ssl_client_context.load_cert_chain(
certfile=self.cert_file.name, keyfile=self.client_key.name)
conn = aiohttp.TCPConnector(
ssl_context=ssl_client_context, loop=self.loop)
self.session = aiohttp.ClientSession(
connector=conn, loop=self.loop, headers=self.headers)
class K8SContextManager:
def __init__(self, environment, loop=None):
self.environment = environment
if loop is None:
self.loop = asyncio.get_event_loop()
else:
self.loop = loop
async def __aenter__(self):
return await self.open()
async def open(self):
self.config = Configuration(self.environment, self.loop)
return K8SCaller(
self.config.ssl_context,
self.environment['endpoint'],
self.config.session,
verify=self.config.verify,
scheme=self.config.scheme)
async def __aexit__(self, exc_type, exc, tb):
await self.close()
async def close(self):
if self.config.file is not None:
os.unlink(self.config.file.name)
await self.config.session.close()
async def create_k8s_caller(environment):
config = Configuration(environment)
return K8SCaller(
config.ssl_context,
environment['endpoint'],
config.session,
verify=config.verify,
scheme=config.scheme) | 0.385722 | 0.059456 |
# Function to find the lowest common ancestor in a BST.
from collections import deque
def LCA(root, n1, n2):
# code here.
if min(n1, n2) <= root.data <= max(n1, n2):
return root
else:
l = r = 0
if root.left != None:
l = LCA(root.left, n1, n2)
if root.right != None:
r = LCA(root.right, n1, n2)
return l or r
# {
# Driver Code Starts
# Initial Template for Python 3
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
# Function to Build Tree
def buildTree(s):
# Corner Case
if(len(s) == 0 or s[0] == "N"):
return None
# Creating list of strings from input
# string after spliting by space
ip = list(map(str, s.split()))
# Create the root of the tree
root = Node(int(ip[0]))
size = 0
q = deque()
# Push the root to the queue
q.append(root)
size = size+1
# Starting from the second element
i = 1
while(size > 0 and i < len(ip)):
# Get and remove the front of the queue
currNode = q[0]
q.popleft()
size = size-1
# Get the current node's value from the string
currVal = ip[i]
# If the left child is not null
if(currVal != "N"):
# Create the left child for the current node
currNode.left = Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size = size+1
# For the right child
i = i+1
if(i >= len(ip)):
break
currVal = ip[i]
# If the right child is not null
if(currVal != "N"):
# Create the right child for the current node
currNode.right = Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size = size+1
i = i+1
return root
if __name__ == "__main__":
t = int(input())
for _ in range(0, t):
s = input()
root = buildTree(s)
n1, n2 = list(map(int, input().split()))
print(LCA(root, n1, n2).data)
# } Driver Code Ends | Competitive Programming/Binary Search Trees/Lowest Common Ancestor in a BST.py |
# Function to find the lowest common ancestor in a BST.
from collections import deque
def LCA(root, n1, n2):
# code here.
if min(n1, n2) <= root.data <= max(n1, n2):
return root
else:
l = r = 0
if root.left != None:
l = LCA(root.left, n1, n2)
if root.right != None:
r = LCA(root.right, n1, n2)
return l or r
# {
# Driver Code Starts
# Initial Template for Python 3
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
# Function to Build Tree
def buildTree(s):
# Corner Case
if(len(s) == 0 or s[0] == "N"):
return None
# Creating list of strings from input
# string after spliting by space
ip = list(map(str, s.split()))
# Create the root of the tree
root = Node(int(ip[0]))
size = 0
q = deque()
# Push the root to the queue
q.append(root)
size = size+1
# Starting from the second element
i = 1
while(size > 0 and i < len(ip)):
# Get and remove the front of the queue
currNode = q[0]
q.popleft()
size = size-1
# Get the current node's value from the string
currVal = ip[i]
# If the left child is not null
if(currVal != "N"):
# Create the left child for the current node
currNode.left = Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size = size+1
# For the right child
i = i+1
if(i >= len(ip)):
break
currVal = ip[i]
# If the right child is not null
if(currVal != "N"):
# Create the right child for the current node
currNode.right = Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size = size+1
i = i+1
return root
if __name__ == "__main__":
t = int(input())
for _ in range(0, t):
s = input()
root = buildTree(s)
n1, n2 = list(map(int, input().split()))
print(LCA(root, n1, n2).data)
# } Driver Code Ends | 0.750918 | 0.643525 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from .registry import register_model
from .helpers import load_pretrained
from .adaptive_avgmax_pool import SelectAdaptivePool2d
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
__all__ = ['Xception65', 'Xception71']
default_cfgs = {
'gluon_xception65': {
'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth',
'input_size': (3, 299, 299),
'crop_pct': 0.875,
'pool_size': (10, 10),
'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'num_classes': 1000,
'first_conv': 'conv1',
'classifier': 'fc'
# The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
},
'gluon_xception71': {
'url': '',
'input_size': (3, 299, 299),
'crop_pct': 0.875,
'pool_size': (10, 10),
'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'num_classes': 1000,
'first_conv': 'conv1',
'classifier': 'fc'
# The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
}
}
""" PADDING NOTES
The original PyTorch and Gluon impl of these models dutifully reproduced the
aligned padding added to Tensorflow models for Deeplab. This padding was compensating
for Tensorflow 'SAME' padding. PyTorch symmetric padding behaves the way we'd want it to.
So, I'm phasing out the 'fixed_padding' ported from TF and replacing with normal
PyTorch padding, some asserts to validate the equivalence for any scenario we'd
care about before removing altogether.
"""
_USE_FIXED_PAD = False
def _pytorch_padding(kernel_size, stride=1, dilation=1, **_):
if _USE_FIXED_PAD:
return 0 # FIXME remove once verified
else:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
# FIXME remove once verified
fp = _fixed_padding(kernel_size, dilation)
assert all(padding == p for p in fp)
return padding
def _fixed_padding(kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
return [pad_beg, pad_end, pad_beg, pad_end]
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1,
dilation=1, bias=False, norm_layer=None, norm_kwargs=None):
super(SeparableConv2d, self).__init__()
norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
self.kernel_size = kernel_size
self.dilation = dilation
padding = _fixed_padding(self.kernel_size, self.dilation)
if _USE_FIXED_PAD and any(p > 0 for p in padding):
self.fixed_padding = nn.ZeroPad2d(padding)
else:
self.fixed_padding = None
# depthwise convolution
self.conv_dw = nn.Conv2d(
inplanes, inplanes, kernel_size, stride=stride,
padding=_pytorch_padding(kernel_size, stride, dilation), dilation=dilation, groups=inplanes, bias=bias)
self.bn = norm_layer(num_features=inplanes, **norm_kwargs)
# pointwise convolution
self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias)
def forward(self, x):
if self.fixed_padding is not None:
# FIXME remove once verified
x = self.fixed_padding(x)
x = self.conv_dw(x)
x = self.bn(x)
x = self.conv_pw(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, num_reps, stride=1, dilation=1, norm_layer=None,
norm_kwargs=None, start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
if planes != inplanes or stride != 1:
self.skip = nn.Sequential()
self.skip.add_module('conv1', nn.Conv2d(
inplanes, planes, 1, stride=stride, bias=False)),
self.skip.add_module('bn1', norm_layer(num_features=planes, **norm_kwargs))
else:
self.skip = None
rep = OrderedDict()
l = 1
filters = inplanes
if grow_first:
if start_with_relu:
rep['act%d' % l] = nn.ReLU(inplace=False) # NOTE: silent failure if inplace=True here
rep['conv%d' % l] = SeparableConv2d(
inplanes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs)
filters = planes
l += 1
for _ in range(num_reps - 1):
if grow_first or start_with_relu:
# FIXME being conservative with inplace here, think it's fine to leave True?
rep['act%d' % l] = nn.ReLU(inplace=grow_first or not start_with_relu)
rep['conv%d' % l] = SeparableConv2d(
filters, filters, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=filters, **norm_kwargs)
l += 1
if not grow_first:
rep['act%d' % l] = nn.ReLU(inplace=True)
rep['conv%d' % l] = SeparableConv2d(
inplanes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs)
l += 1
if stride != 1:
rep['act%d' % l] = nn.ReLU(inplace=True)
rep['conv%d' % l] = SeparableConv2d(
planes, planes, 3, stride, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs)
l += 1
elif is_last:
rep['act%d' % l] = nn.ReLU(inplace=True)
rep['conv%d' % l] = SeparableConv2d(
planes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs)
l += 1
self.rep = nn.Sequential(rep)
def forward(self, x):
skip = x
if self.skip is not None:
skip = self.skip(skip)
x = self.rep(x) + skip
return x
class Xception65(nn.Module):
"""Modified Aligned Xception
"""
def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d,
norm_kwargs=None, drop_rate=0., global_pool='avg'):
super(Xception65, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
if output_stride == 32:
entry_block3_stride = 2
exit_block20_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 1)
elif output_stride == 16:
entry_block3_stride = 2
exit_block20_stride = 1
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
exit_block20_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = norm_layer(num_features=32, **norm_kwargs)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = norm_layer(num_features=64)
self.block1 = Block(
64, 128, num_reps=2, stride=2,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False)
self.block2 = Block(
128, 256, num_reps=2, stride=2,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True)
self.block3 = Block(
256, 728, num_reps=2, stride=entry_block3_stride,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True, is_last=True)
# Middle flow
self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block(
728, 728, num_reps=3, stride=1, dilation=middle_block_dilation,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True))
for i in range(4, 20)]))
# Exit flow
self.block20 = Block(
728, 1024, num_reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0],
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(
1024, 1536, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn3 = norm_layer(num_features=1536, **norm_kwargs)
self.conv4 = SeparableConv2d(
1536, 1536, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn4 = norm_layer(num_features=1536, **norm_kwargs)
self.num_features = 2048
self.conv5 = SeparableConv2d(
1536, self.num_features, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn5 = norm_layer(num_features=self.num_features, **norm_kwargs)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes)
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes) if num_classes else None
def forward_features(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# add relu here
x = self.relu(x)
# c1 = x
x = self.block2(x)
# c2 = x
x = self.block3(x)
# Middle flow
x = self.mid(x)
# c3 = x
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x).flatten(1)
if self.drop_rate:
F.dropout(x, self.drop_rate, training=self.training)
x = self.fc(x)
return x
class Xception71(nn.Module):
"""Modified Aligned Xception
"""
def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d,
norm_kwargs=None, drop_rate=0., global_pool='avg'):
super(Xception71, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
if output_stride == 32:
entry_block3_stride = 2
exit_block20_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 1)
elif output_stride == 16:
entry_block3_stride = 2
exit_block20_stride = 1
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
exit_block20_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = norm_layer(num_features=32, **norm_kwargs)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = norm_layer(num_features=64)
self.block1 = Block(
64, 128, num_reps=2, stride=2, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=False)
self.block2 = nn.Sequential(*[
Block(
128, 256, num_reps=2, stride=1, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True),
Block(
256, 256, num_reps=2, stride=2, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True),
Block(
256, 728, num_reps=2, stride=2, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True)])
self.block3 = Block(
728, 728, num_reps=2, stride=entry_block3_stride, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True, is_last=True)
# Middle flow
self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block(
728, 728, num_reps=3, stride=1, dilation=middle_block_dilation,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True))
for i in range(4, 20)]))
# Exit flow
self.block20 = Block(
728, 1024, num_reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0],
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(
1024, 1536, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn3 = norm_layer(num_features=1536, **norm_kwargs)
self.conv4 = SeparableConv2d(
1536, 1536, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn4 = norm_layer(num_features=1536, **norm_kwargs)
self.num_features = 2048
self.conv5 = SeparableConv2d(
1536, self.num_features, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn5 = norm_layer(num_features=self.num_features, **norm_kwargs)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes)
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes) if num_classes else None
def forward_features(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# add relu here
x = self.relu(x)
# low_level_feat = x
x = self.block2(x)
# c2 = x
x = self.block3(x)
# Middle flow
x = self.mid(x)
# c3 = x
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x).flatten(1)
if self.drop_rate:
F.dropout(x, self.drop_rate, training=self.training)
x = self.fc(x)
return x
@register_model
def gluon_xception65(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
""" Modified Aligned Xception-65
"""
default_cfg = default_cfgs['gluon_xception65']
model = Xception65(num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def gluon_xception71(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
""" Modified Aligned Xception-71
"""
default_cfg = default_cfgs['gluon_xception71']
model = Xception71(num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model | deeptensor/model/timm/gluon_xception.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from .registry import register_model
from .helpers import load_pretrained
from .adaptive_avgmax_pool import SelectAdaptivePool2d
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
__all__ = ['Xception65', 'Xception71']
default_cfgs = {
'gluon_xception65': {
'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth',
'input_size': (3, 299, 299),
'crop_pct': 0.875,
'pool_size': (10, 10),
'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'num_classes': 1000,
'first_conv': 'conv1',
'classifier': 'fc'
# The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
},
'gluon_xception71': {
'url': '',
'input_size': (3, 299, 299),
'crop_pct': 0.875,
'pool_size': (10, 10),
'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'num_classes': 1000,
'first_conv': 'conv1',
'classifier': 'fc'
# The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
}
}
""" PADDING NOTES
The original PyTorch and Gluon impl of these models dutifully reproduced the
aligned padding added to Tensorflow models for Deeplab. This padding was compensating
for Tensorflow 'SAME' padding. PyTorch symmetric padding behaves the way we'd want it to.
So, I'm phasing out the 'fixed_padding' ported from TF and replacing with normal
PyTorch padding, some asserts to validate the equivalence for any scenario we'd
care about before removing altogether.
"""
_USE_FIXED_PAD = False
def _pytorch_padding(kernel_size, stride=1, dilation=1, **_):
if _USE_FIXED_PAD:
return 0 # FIXME remove once verified
else:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
# FIXME remove once verified
fp = _fixed_padding(kernel_size, dilation)
assert all(padding == p for p in fp)
return padding
def _fixed_padding(kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
return [pad_beg, pad_end, pad_beg, pad_end]
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1,
dilation=1, bias=False, norm_layer=None, norm_kwargs=None):
super(SeparableConv2d, self).__init__()
norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
self.kernel_size = kernel_size
self.dilation = dilation
padding = _fixed_padding(self.kernel_size, self.dilation)
if _USE_FIXED_PAD and any(p > 0 for p in padding):
self.fixed_padding = nn.ZeroPad2d(padding)
else:
self.fixed_padding = None
# depthwise convolution
self.conv_dw = nn.Conv2d(
inplanes, inplanes, kernel_size, stride=stride,
padding=_pytorch_padding(kernel_size, stride, dilation), dilation=dilation, groups=inplanes, bias=bias)
self.bn = norm_layer(num_features=inplanes, **norm_kwargs)
# pointwise convolution
self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias)
def forward(self, x):
if self.fixed_padding is not None:
# FIXME remove once verified
x = self.fixed_padding(x)
x = self.conv_dw(x)
x = self.bn(x)
x = self.conv_pw(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, num_reps, stride=1, dilation=1, norm_layer=None,
norm_kwargs=None, start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
if planes != inplanes or stride != 1:
self.skip = nn.Sequential()
self.skip.add_module('conv1', nn.Conv2d(
inplanes, planes, 1, stride=stride, bias=False)),
self.skip.add_module('bn1', norm_layer(num_features=planes, **norm_kwargs))
else:
self.skip = None
rep = OrderedDict()
l = 1
filters = inplanes
if grow_first:
if start_with_relu:
rep['act%d' % l] = nn.ReLU(inplace=False) # NOTE: silent failure if inplace=True here
rep['conv%d' % l] = SeparableConv2d(
inplanes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs)
filters = planes
l += 1
for _ in range(num_reps - 1):
if grow_first or start_with_relu:
# FIXME being conservative with inplace here, think it's fine to leave True?
rep['act%d' % l] = nn.ReLU(inplace=grow_first or not start_with_relu)
rep['conv%d' % l] = SeparableConv2d(
filters, filters, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=filters, **norm_kwargs)
l += 1
if not grow_first:
rep['act%d' % l] = nn.ReLU(inplace=True)
rep['conv%d' % l] = SeparableConv2d(
inplanes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs)
l += 1
if stride != 1:
rep['act%d' % l] = nn.ReLU(inplace=True)
rep['conv%d' % l] = SeparableConv2d(
planes, planes, 3, stride, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs)
l += 1
elif is_last:
rep['act%d' % l] = nn.ReLU(inplace=True)
rep['conv%d' % l] = SeparableConv2d(
planes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs)
l += 1
self.rep = nn.Sequential(rep)
def forward(self, x):
skip = x
if self.skip is not None:
skip = self.skip(skip)
x = self.rep(x) + skip
return x
class Xception65(nn.Module):
"""Modified Aligned Xception
"""
def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d,
norm_kwargs=None, drop_rate=0., global_pool='avg'):
super(Xception65, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
if output_stride == 32:
entry_block3_stride = 2
exit_block20_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 1)
elif output_stride == 16:
entry_block3_stride = 2
exit_block20_stride = 1
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
exit_block20_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = norm_layer(num_features=32, **norm_kwargs)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = norm_layer(num_features=64)
self.block1 = Block(
64, 128, num_reps=2, stride=2,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False)
self.block2 = Block(
128, 256, num_reps=2, stride=2,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True)
self.block3 = Block(
256, 728, num_reps=2, stride=entry_block3_stride,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True, is_last=True)
# Middle flow
self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block(
728, 728, num_reps=3, stride=1, dilation=middle_block_dilation,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True))
for i in range(4, 20)]))
# Exit flow
self.block20 = Block(
728, 1024, num_reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0],
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(
1024, 1536, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn3 = norm_layer(num_features=1536, **norm_kwargs)
self.conv4 = SeparableConv2d(
1536, 1536, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn4 = norm_layer(num_features=1536, **norm_kwargs)
self.num_features = 2048
self.conv5 = SeparableConv2d(
1536, self.num_features, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn5 = norm_layer(num_features=self.num_features, **norm_kwargs)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes)
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes) if num_classes else None
def forward_features(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# add relu here
x = self.relu(x)
# c1 = x
x = self.block2(x)
# c2 = x
x = self.block3(x)
# Middle flow
x = self.mid(x)
# c3 = x
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x).flatten(1)
if self.drop_rate:
F.dropout(x, self.drop_rate, training=self.training)
x = self.fc(x)
return x
class Xception71(nn.Module):
"""Modified Aligned Xception
"""
def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d,
norm_kwargs=None, drop_rate=0., global_pool='avg'):
super(Xception71, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
if output_stride == 32:
entry_block3_stride = 2
exit_block20_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 1)
elif output_stride == 16:
entry_block3_stride = 2
exit_block20_stride = 1
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
exit_block20_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = norm_layer(num_features=32, **norm_kwargs)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = norm_layer(num_features=64)
self.block1 = Block(
64, 128, num_reps=2, stride=2, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=False)
self.block2 = nn.Sequential(*[
Block(
128, 256, num_reps=2, stride=1, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True),
Block(
256, 256, num_reps=2, stride=2, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True),
Block(
256, 728, num_reps=2, stride=2, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True)])
self.block3 = Block(
728, 728, num_reps=2, stride=entry_block3_stride, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True, is_last=True)
# Middle flow
self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block(
728, 728, num_reps=3, stride=1, dilation=middle_block_dilation,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True))
for i in range(4, 20)]))
# Exit flow
self.block20 = Block(
728, 1024, num_reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0],
norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(
1024, 1536, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn3 = norm_layer(num_features=1536, **norm_kwargs)
self.conv4 = SeparableConv2d(
1536, 1536, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn4 = norm_layer(num_features=1536, **norm_kwargs)
self.num_features = 2048
self.conv5 = SeparableConv2d(
1536, self.num_features, 3, stride=1, dilation=exit_block_dilations[1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.bn5 = norm_layer(num_features=self.num_features, **norm_kwargs)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes)
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes) if num_classes else None
def forward_features(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# add relu here
x = self.relu(x)
# low_level_feat = x
x = self.block2(x)
# c2 = x
x = self.block3(x)
# Middle flow
x = self.mid(x)
# c3 = x
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x).flatten(1)
if self.drop_rate:
F.dropout(x, self.drop_rate, training=self.training)
x = self.fc(x)
return x
@register_model
def gluon_xception65(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
""" Modified Aligned Xception-65
"""
default_cfg = default_cfgs['gluon_xception65']
model = Xception65(num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def gluon_xception71(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
""" Modified Aligned Xception-71
"""
default_cfg = default_cfgs['gluon_xception71']
model = Xception71(num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model | 0.736495 | 0.385057 |
import argparse
import os
import random
from glob import glob
from pathlib import Path
import click
import numpy as np
import tifffile
from PIL import Image
from tqdm import tqdm
def img_loader(fp):
if Path(fp).suffix.lower() in [".jpg", ".jpeg", ".png"]:
arr = np.array(Image.open(fp))
else:
arr = tifffile.imread(fp)
return arr
@click.command(help='calcuate the datasets mean and std value')
@click.option('--root', type=click.Path(exists=True), required=True, help='root dir of image datasets')
@click.option("--percent", default=0.5, type=float, help="percent of images to calcuate")
@click.option("--channels", default=3, type=int, help="datasets image channels")
@click.option("--maxvalue", default=255, type=float, help="max value of all images default: {255}")
@click.option("--extension", type=str, default=('jpg', 'jpeg', 'png', 'tif', 'tiff'), multiple=True,
help="file suffix to calcuate, default ('jpg', 'jpeg', 'png', 'tif', 'tiff')")
def calcuate_mean_std(root, percent, channels, maxvalue, extension):
files = [x for x in Path(root).glob('**/*') if x.suffix.lower()[1:] in extension and '._' not in str(x)]
random.shuffle(files)
files = files[0: int(len(files) * percent)]
if not files:
print("INFO: No Image Found!")
return
pixel_num = 0 # store all pixel number in the dataset
channel_sum = np.zeros(channels)
channel_sum_squared = np.zeros(channels)
for item in tqdm(files):
arr = img_loader(item)
arr = arr / maxvalue
pixel_num += arr.shape[0] * arr.shape[1]
channel_sum += np.sum(arr, axis=(0, 1))
channel_sum_squared += np.sum(np.square(arr), axis=(0, 1))
mean = channel_sum / pixel_num
std = np.sqrt(channel_sum_squared / pixel_num - np.square(mean))
print("scaled mean:{} \nscaled std: {} ".format(mean, std))
print("orginal mean: {} \norginal std: {}".format(mean * maxvalue, std * maxvalue))
if __name__ == "__main__":
calcuate_mean_std() | torchsat_imc/cli/calcuate_mean_std.py | import argparse
import os
import random
from glob import glob
from pathlib import Path
import click
import numpy as np
import tifffile
from PIL import Image
from tqdm import tqdm
def img_loader(fp):
if Path(fp).suffix.lower() in [".jpg", ".jpeg", ".png"]:
arr = np.array(Image.open(fp))
else:
arr = tifffile.imread(fp)
return arr
@click.command(help='calcuate the datasets mean and std value')
@click.option('--root', type=click.Path(exists=True), required=True, help='root dir of image datasets')
@click.option("--percent", default=0.5, type=float, help="percent of images to calcuate")
@click.option("--channels", default=3, type=int, help="datasets image channels")
@click.option("--maxvalue", default=255, type=float, help="max value of all images default: {255}")
@click.option("--extension", type=str, default=('jpg', 'jpeg', 'png', 'tif', 'tiff'), multiple=True,
help="file suffix to calcuate, default ('jpg', 'jpeg', 'png', 'tif', 'tiff')")
def calcuate_mean_std(root, percent, channels, maxvalue, extension):
files = [x for x in Path(root).glob('**/*') if x.suffix.lower()[1:] in extension and '._' not in str(x)]
random.shuffle(files)
files = files[0: int(len(files) * percent)]
if not files:
print("INFO: No Image Found!")
return
pixel_num = 0 # store all pixel number in the dataset
channel_sum = np.zeros(channels)
channel_sum_squared = np.zeros(channels)
for item in tqdm(files):
arr = img_loader(item)
arr = arr / maxvalue
pixel_num += arr.shape[0] * arr.shape[1]
channel_sum += np.sum(arr, axis=(0, 1))
channel_sum_squared += np.sum(np.square(arr), axis=(0, 1))
mean = channel_sum / pixel_num
std = np.sqrt(channel_sum_squared / pixel_num - np.square(mean))
print("scaled mean:{} \nscaled std: {} ".format(mean, std))
print("orginal mean: {} \norginal std: {}".format(mean * maxvalue, std * maxvalue))
if __name__ == "__main__":
calcuate_mean_std() | 0.478773 | 0.222964 |
import numpy as np
from .. import ccllib as lib
from ..core import check
from ..pk2d import Pk2D
from ..power import linear_matter_power, nonlin_matter_power
from ..background import growth_factor
from .tracers import PTTracer
try:
import fastpt as fpt
HAVE_FASTPT = True
except ImportError:
HAVE_FASTPT = False
class PTCalculator(object):
""" This class implements a set of methods that can be
used to compute the various components needed to estimate
perturbation theory correlations. These calculations are
currently based on FAST-PT
(https://github.com/JoeMcEwen/FAST-PT).
Args:
with_NC (bool): set to True if you'll want to use
this calculator to compute correlations involving
number counts.
with_IA(bool): set to True if you'll want to use
this calculator to compute correlations involving
intrinsic alignments.
with_dd(bool): set to True if you'll want to use
this calculator to compute the one-loop matter power
spectrum.
log10k_min (float): decimal logarithm of the minimum
Fourier scale (in Mpc^-1) for which you want to
calculate perturbation theory quantities.
log10k_max (float): decimal logarithm of the maximum
Fourier scale (in Mpc^-1) for which you want to
calculate perturbation theory quantities.
pad_factor (float): fraction of the log(k) interval
you want to add as padding for FFTLog calculations
within FAST-PT.
low_extrap (float): decimal logaritm of the minimum
Fourier scale (in Mpc^-1) for which FAST-PT will
extrapolate.
high_extrap (float): decimal logaritm of the maximum
Fourier scale (in Mpc^-1) for which FAST-PT will
extrapolate.
P_window (array_like or None): 2-element array describing
the tapering window used by FAST-PT. See FAST-PT
documentation for more details.
C_window (float): `C_window` parameter used by FAST-PT
to smooth the edges and avoid ringing. See FAST-PT
documentation for more details.
"""
def __init__(self, with_NC=False, with_IA=False, with_dd=True,
log10k_min=-4, log10k_max=2, nk_per_decade=20,
pad_factor=1, low_extrap=-5, high_extrap=3,
P_window=None, C_window=.75):
assert HAVE_FASTPT, (
"You must have the `FAST-PT` python package "
"installed to use CCL to get PT observables! "
"You can install it with pip install fast-pt.")
self.with_dd = with_dd
self.with_NC = with_NC
self.with_IA = with_IA
self.P_window = P_window
self.C_window = C_window
to_do = ['one_loop_dd']
if self.with_NC:
to_do.append('dd_bias')
if self.with_IA:
to_do.append('IA')
nk_total = int((log10k_max - log10k_min) * nk_per_decade)
self.ks = np.logspace(log10k_min, log10k_max, nk_total)
n_pad = int(pad_factor * len(self.ks))
self.pt = fpt.FASTPT(self.ks, to_do=to_do,
low_extrap=low_extrap,
high_extrap=high_extrap,
n_pad=n_pad)
self.one_loop_dd = None
self.dd_bias = None
self.ia_ta = None
self.ia_tt = None
self.ia_mix = None
def update_pk(self, pk):
""" Update the internal PT arrays.
Args:
pk (array_like): linear power spectrum sampled at the
internal `k` values used by this calculator.
"""
if pk.shape != self.ks.shape:
raise ValueError("Input spectrum has wrong shape")
if self.with_NC:
self._get_dd_bias(pk)
self.with_dd = True
elif self.with_dd:
self._get_one_loop_dd(pk)
if self.with_IA:
self._get_ia_bias(pk)
def _get_one_loop_dd(self, pk):
# Precompute quantities needed for one-loop dd
# power spectra. Only needed if dd_bias is not called.
self.one_loop_dd = self.pt.one_loop_dd(pk,
P_window=self.P_window,
C_window=self.C_window)
def _get_dd_bias(self, pk):
# Precompute quantities needed for number counts
# power spectra.
self.dd_bias = self.pt.one_loop_dd_bias(pk,
P_window=self.P_window,
C_window=self.C_window)
self.one_loop_dd = self.dd_bias[0:1]
def _get_ia_bias(self, pk):
# Precompute quantities needed for intrinsic alignment
# power spectra.
self.ia_ta = self.pt.IA_ta(pk,
P_window=self.P_window,
C_window=self.C_window)
self.ia_tt = self.pt.IA_tt(pk,
P_window=self.P_window,
C_window=self.C_window)
self.ia_mix = self.pt.IA_mix(pk,
P_window=self.P_window,
C_window=self.C_window)
def get_pgg(self, Pd1d1, g4,
b11, b21, bs1, b12, b22, bs2,
sub_lowk):
""" Get the number counts auto-spectrum at the internal
set of wavenumbers (given by this object's `ks` attribute)
and a number of redshift values.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
b11 (array_like): 1-st order bias for the first tracer
being correlated at the same set of input redshifts.
b21 (array_like): 2-nd order bias for the first tracer
being correlated at the same set of input redshifts.
bs1 (array_like): tidal bias for the first tracer
being correlated at the same set of input redshifts.
b12 (array_like): 1-st order bias for the second tracer
being correlated at the same set of input redshifts.
b22 (array_like): 2-nd order bias for the second tracer
being correlated at the same set of input redshifts.
bs2 (array_like): tidal bias for the second tracer
being correlated at the same set of input redshifts.
sub_lowk (bool): if True, the small-scale white noise
contribution will be subtracted.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
Pd1d2 = g4[None, :] * self.dd_bias[2][:, None]
Pd2d2 = g4[None, :] * self.dd_bias[3][:, None]
Pd1s2 = g4[None, :] * self.dd_bias[4][:, None]
Pd2s2 = g4[None, :] * self.dd_bias[5][:, None]
Ps2s2 = g4[None, :] * self.dd_bias[6][:, None]
s4 = 0.
if sub_lowk:
s4 = g4 * self.dd_bias[7]
s4 = s4[None, :]
pgg = ((b11*b12)[None, :] * Pd1d1 +
0.5*(b11*b22 + b12*b21)[None, :] * Pd1d2 +
0.25*(b21*b22)[None, :] * (Pd2d2 - 2.*s4) +
0.5*(b11*bs2 + b12*bs1)[None, :] * Pd1s2 +
0.25*(b21*bs2 + b22*bs1)[None, :] * (Pd2s2 - (4./3.)*s4) +
0.25*(bs1*bs2)[None, :] * (Ps2s2 - (8./9.)*s4))
return pgg
def get_pgi(self, Pd1d1, g4, b1, b2, bs, c1, c2, cd):
""" Get the number counts - IA cross-spectrum at the
internal set of wavenumbers (given by this object's
`ks` attribute) and a number of redshift values.
.. note:: The full non-linear model for the cross-correlation
between number counts and intrinsic alignments is
still work in progress in FastPT. As a workaround
CCL assumes a non-linear treatment of IAs, but only
linearly biased number counts.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
b1 (array_like): 1-st order bias for the number counts
being correlated at the same set of input redshifts.
b2 (array_like): 2-nd order bias for the number counts
being correlated at the same set of input redshifts.
bs (array_like): tidal bias for the number counts
being correlated at the same set of input redshifts.
c1 (array_like): 1-st order bias for the IA tracer
being correlated at the same set of input redshifts.
c2 (array_like): 2-nd order bias for the IA tracer
being correlated at the same set of input redshifts.
cd (array_like): overdensity bias for the IA tracer
being correlated at the same set of input redshifts.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
a00e, c00e, a0e0e, a0b0b = self.ia_ta
a0e2, b0e2, d0ee2, d0bb2 = self.ia_mix
pgi = b1[None, :] * (c1[None, :] * Pd1d1 +
(g4*cd)[None, :] * (a00e + c00e)[:, None] +
(g4*c2)[None, :] * (a0e2 + b0e2)[:, None])
return pgi
def get_pgm(self, Pd1d1, g4, b1, b2, bs):
""" Get the number counts - matter cross-spectrum at the
internal set of wavenumbers (given by this object's `ks`
attribute) and a number of redshift values.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
b1 (array_like): 1-st order bias for the number counts
tracer being correlated at the same set of input
redshifts.
b2 (array_like): 2-nd order bias for the number counts
tracer being correlated at the same set of input
redshifts.
bs (array_like): tidal bias for the number counts
tracer being correlated at the same set of input
redshifts.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
Pd1d2 = g4[None, :] * self.dd_bias[2][:, None]
Pd1s2 = g4[None, :] * self.dd_bias[4][:, None]
pgm = (b1[None, :] * Pd1d1 +
0.5 * b2[None, :] * Pd1d2 +
0.5 * bs[None, :] * Pd1s2)
return pgm
def get_pii(self, Pd1d1, g4, c11, c21, cd1,
c12, c22, cd2, return_bb=False,
return_both=False):
""" Get the intrinsic alignment auto-spectrum at the internal
set of wavenumbers (given by this object's `ks` attribute)
and a number of redshift values.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
c11 (array_like): 1-st order bias for the first tracer
being correlated at the same set of input redshifts.
c21 (array_like): 2-nd order bias for the first tracer
being correlated at the same set of input redshifts.
cd1 (array_like): overdensity bias for the first tracer
being correlated at the same set of input redshifts.
c12 (array_like): 1-st order bias for the second tracer
being correlated at the same set of input redshifts.
c22 (array_like): 2-nd order bias for the second tracer
being correlated at the same set of input redshifts.
cd2 (array_like): overdensity bias for the second tracer
being correlated at the same set of input redshifts.
return_bb (bool): if `True`, the B-mode power spectrum
will be returned.
return_both (bool): if `True`, both the E- and B-mode
power spectra will be returned. Supersedes `return_bb`.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
a00e, c00e, a0e0e, a0b0b = self.ia_ta
ae2e2, ab2b2 = self.ia_tt
a0e2, b0e2, d0ee2, d0bb2 = self.ia_mix
if return_both:
return_bb = True
if return_bb:
pii_bb = ((cd1*cd2*g4)[None, :] * a0b0b[:, None] +
(c21*c22*g4)[None, :] * ab2b2[:, None] +
((cd1*c22 + c21*cd2)*g4)[None, :] * d0bb2[:, None])
if not return_both:
pii = pii_bb
if (not return_bb) or return_both:
pii = ((c11*c12)[None, :] * Pd1d1 +
((c11*cd2 + c12*cd1)*g4)[None, :] * (a00e + c00e)[:, None] +
(cd1*cd2*g4)[None, :] * a0e0e[:, None] +
(c21*c22*g4)[None, :] * ae2e2[:, None] +
((c11*c22 + c21*c12)*g4)[None, :] * (a0e2 + b0e2)[:, None] +
((cd1*c22 + cd2*c21)*g4)[None, :] * d0ee2[:, None])
if return_both:
return pii, pii_bb
else:
return pii
def get_pim(self, Pd1d1, g4, c1, c2, cd):
""" Get the intrinsic alignment - matter cross-spectrum at
the internal set of wavenumbers (given by this object's `ks`
attribute) and a number of redshift values.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
c1 (array_like): 1-st order bias for the IA
tracer being correlated at the same set of input
redshifts.
c2 (array_like): 2-nd order bias for the IA
tracer being correlated at the same set of input
redshifts.
cd (array_like): overdensity bias for the IA
tracer being correlated at the same set of input
redshifts.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
a00e, c00e, a0e0e, a0b0b = self.ia_ta
a0e2, b0e2, d0ee2, d0bb2 = self.ia_mix
pim = (c1[None, :] * Pd1d1 +
(g4*cd)[None, :] * (a00e + c00e)[:, None] +
(g4*c2)[None, :] * (a0e2 + b0e2)[:, None])
return pim
def get_pmm(self, Pd1d1_lin, g4):
""" Get the one-loop matter power spectrum.
Args:
Pd1d1_lin (array_like): 1-loop linear matter power spectrum
at the wavenumber values given by this object's
`ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
P1loop = g4[None, :] * self.one_loop_dd[0][:, None]
pmm = (Pd1d1_lin + P1loop)
return pmm
def get_pt_pk2d(cosmo, tracer1, tracer2=None, ptc=None,
sub_lowk=False, nonlin_pk_type='nonlinear',
a_arr=None, extrap_order_lok=1, extrap_order_hik=2,
return_ia_bb=False, return_ia_ee_and_bb=False):
"""Returns a :class:`~pyccl.pk2d.Pk2D` object containing
the PT power spectrum for two quantities defined by
two :class:`~pyccl.nl_pt.tracers.PTTracer` objects.
.. note:: The full non-linear model for the cross-correlation
between number counts and intrinsic alignments is
still work in progress in FastPT. As a workaround
CCL assumes a non-linear treatment of IAs, but only
linearly biased number counts.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object.
tracer1 (:class:`~pyccl.nl_pt.tracers.PTTracer`): the first
tracer being correlated.
ptc (:class:`PTCalculator`): a perturbation theory
calculator.
tracer2 (:class:`~pyccl.nl_pt.tracers.PTTracer`): the second
tracer being correlated. If `None`, the auto-correlation
of the first tracer will be returned.
sub_lowk (bool): if True, the small-scale white noise
contribution will be subtracted for number counts
auto-correlations.
nonlin_pk_type (str): type of 1-loop matter power spectrum
to use. 'linear' for linear P(k), 'nonlinear' for the internal
non-linear power spectrum, 'spt' for standard perturbation
theory power spectrum. Default: 'nonlinear'.
a_arr (array): an array holding values of the scale factor
at which the power spectrum should be calculated for
interpolation. If `None`, the internal values used by
`cosmo` will be used.
extrap_order_lok (int): extrapolation order to be used on
k-values below the minimum of the splines. See
:class:`~pyccl.pk2d.Pk2D`.
extrap_order_hik (int): extrapolation order to be used on
k-values above the maximum of the splines. See
:class:`~pyccl.pk2d.Pk2D`.
return_ia_bb (bool): if `True`, the B-mode power spectrum
for intrinsic alignments will be returned (if both
input tracers are of type
:class:`~pyccl.nl_pt.tracers.PTIntrinsicAlignmentTracer`)
If `False` (default) E-mode power spectrum is returned.
return_ia_ee_and_bb (bool): if `True`, the E-mode power spectrum
for intrinsic alignments will be returned in addition to
the B-mode one (if both input tracers are of type
:class:`~pyccl.nl_pt.tracers.PTIntrinsicAlignmentTracer`)
If `False` (default) E-mode power spectrum is returned.
Supersedes `return_ia_bb`.
Returns:
:class:`~pyccl.pk2d.Pk2D`: PT power spectrum.
"""
if a_arr is None:
status = 0
na = lib.get_pk_spline_na(cosmo.cosmo)
a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status)
check(status)
if tracer2 is None:
tracer2 = tracer1
if not isinstance(tracer1, PTTracer):
raise TypeError("tracer1 must be of type `PTTracer`")
if not isinstance(tracer2, PTTracer):
raise TypeError("tracer2 must be of type `PTTracer`")
if ptc is None:
with_NC = ((tracer1.type == 'NC')
or (tracer2.type == 'NC'))
with_IA = ((tracer1.type == 'IA')
or (tracer2.type == 'IA'))
with_dd = nonlin_pk_type == 'spt'
ptc = PTCalculator(with_dd=with_dd,
with_NC=with_NC,
with_IA=with_IA)
if not isinstance(ptc, PTCalculator):
raise TypeError("ptc should be of type `PTCalculator`")
if (tracer1.type == 'NC') or (tracer2.type == 'NC'):
if not ptc.with_NC:
raise ValueError("Need number counts bias, "
"but calculator didn't compute it")
if (tracer1.type == 'IA') or (tracer2.type == 'IA'):
if not ptc.with_IA:
raise ValueError("Need intrinsic alignment bias, "
"but calculator didn't compute it")
if nonlin_pk_type == 'spt':
if not ptc.with_dd:
raise ValueError("Need 1-loop matter power spectrum, "
"but calculator didn't compute it")
if return_ia_ee_and_bb:
return_ia_bb = True
# z
z_arr = 1. / a_arr - 1
# P_lin(k) at z=0
pk_lin_z0 = linear_matter_power(cosmo, ptc.ks, 1.)
# Linear growth factor
ga = growth_factor(cosmo, a_arr)
ga4 = ga**4
# update the PTC to have the require Pk components
ptc.update_pk(pk_lin_z0)
if nonlin_pk_type == 'nonlinear':
Pd1d1 = np.array([nonlin_matter_power(cosmo, ptc.ks, a)
for a in a_arr]).T
elif nonlin_pk_type == 'linear':
Pd1d1 = np.array([linear_matter_power(cosmo, ptc.ks, a)
for a in a_arr]).T
elif nonlin_pk_type == 'spt':
pklin = np.array([linear_matter_power(cosmo, ptc.ks, a)
for a in a_arr]).T
Pd1d1 = ptc.get_pmm(pklin, ga4)
else:
raise NotImplementedError("Nonlinear option %s not implemented yet" %
(nonlin_pk_type))
if (tracer1.type == 'NC'):
b11 = tracer1.b1(z_arr)
b21 = tracer1.b2(z_arr)
bs1 = tracer1.bs(z_arr)
if (tracer2.type == 'NC'):
b12 = tracer2.b1(z_arr)
b22 = tracer2.b2(z_arr)
bs2 = tracer2.bs(z_arr)
p_pt = ptc.get_pgg(Pd1d1, ga4,
b11, b21, bs1, b12, b22, bs2,
sub_lowk)
elif (tracer2.type == 'IA'):
c12 = tracer2.c1(z_arr)
c22 = tracer2.c2(z_arr)
cd2 = tracer2.cdelta(z_arr)
p_pt = ptc.get_pgi(Pd1d1, ga4,
b11, b21, bs1, c12, c22, cd2)
elif (tracer2.type == 'M'):
p_pt = ptc.get_pgm(Pd1d1, ga4,
b11, b21, bs1)
else:
raise NotImplementedError("Combination %s-%s not implemented yet" %
(tracer1.type, tracer2.type))
elif (tracer1.type == 'IA'):
c11 = tracer1.c1(z_arr)
c21 = tracer1.c2(z_arr)
cd1 = tracer1.cdelta(z_arr)
if (tracer2.type == 'IA'):
c12 = tracer2.c1(z_arr)
c22 = tracer2.c2(z_arr)
cd2 = tracer2.cdelta(z_arr)
p_pt = ptc.get_pii(Pd1d1, ga4,
c11, c21, cd1, c12, c22, cd2,
return_bb=return_ia_bb,
return_both=return_ia_ee_and_bb)
elif (tracer2.type == 'NC'):
b12 = tracer2.b1(z_arr)
b22 = tracer2.b2(z_arr)
bs2 = tracer2.bs(z_arr)
p_pt = ptc.get_pgi(Pd1d1, ga4,
b12, b22, bs2, c11, c21, cd1)
elif (tracer2.type == 'M'):
p_pt = ptc.get_pim(Pd1d1, ga4,
c11, c21, cd1)
else:
raise NotImplementedError("Combination %s-%s not implemented yet" %
(tracer1.type, tracer2.type))
elif (tracer1.type == 'M'):
if (tracer2.type == 'NC'):
b12 = tracer2.b1(z_arr)
b22 = tracer2.b2(z_arr)
bs2 = tracer2.bs(z_arr)
p_pt = ptc.get_pgm(Pd1d1, ga4,
b12, b22, bs2)
elif (tracer2.type == 'IA'):
c12 = tracer2.c1(z_arr)
c22 = tracer2.c2(z_arr)
cd2 = tracer2.cdelta(z_arr)
p_pt = ptc.get_pim(Pd1d1, ga4,
c12, c22, cd2)
elif (tracer2.type == 'M'):
p_pt = Pd1d1
else:
raise NotImplementedError("Combination %s-%s not implemented yet" %
(tracer1.type, tracer2.type))
else:
raise NotImplementedError("Combination %s-%s not implemented yet" %
(tracer1.type, tracer2.type))
# Once you have created the 2-dimensional P(k) array,
# then generate a Pk2D object as described in pk2d.py.
if return_ia_ee_and_bb:
pt_pk_ee = Pk2D(a_arr=a_arr,
lk_arr=np.log(ptc.ks),
pk_arr=p_pt[0].T,
is_logp=False)
pt_pk_bb = Pk2D(a_arr=a_arr,
lk_arr=np.log(ptc.ks),
pk_arr=p_pt[1].T,
is_logp=False)
return pt_pk_ee, pt_pk_bb
else:
pt_pk = Pk2D(a_arr=a_arr,
lk_arr=np.log(ptc.ks),
pk_arr=p_pt.T,
is_logp=False)
return pt_pk | pyccl/nl_pt/power.py | import numpy as np
from .. import ccllib as lib
from ..core import check
from ..pk2d import Pk2D
from ..power import linear_matter_power, nonlin_matter_power
from ..background import growth_factor
from .tracers import PTTracer
try:
import fastpt as fpt
HAVE_FASTPT = True
except ImportError:
HAVE_FASTPT = False
class PTCalculator(object):
""" This class implements a set of methods that can be
used to compute the various components needed to estimate
perturbation theory correlations. These calculations are
currently based on FAST-PT
(https://github.com/JoeMcEwen/FAST-PT).
Args:
with_NC (bool): set to True if you'll want to use
this calculator to compute correlations involving
number counts.
with_IA(bool): set to True if you'll want to use
this calculator to compute correlations involving
intrinsic alignments.
with_dd(bool): set to True if you'll want to use
this calculator to compute the one-loop matter power
spectrum.
log10k_min (float): decimal logarithm of the minimum
Fourier scale (in Mpc^-1) for which you want to
calculate perturbation theory quantities.
log10k_max (float): decimal logarithm of the maximum
Fourier scale (in Mpc^-1) for which you want to
calculate perturbation theory quantities.
pad_factor (float): fraction of the log(k) interval
you want to add as padding for FFTLog calculations
within FAST-PT.
low_extrap (float): decimal logaritm of the minimum
Fourier scale (in Mpc^-1) for which FAST-PT will
extrapolate.
high_extrap (float): decimal logaritm of the maximum
Fourier scale (in Mpc^-1) for which FAST-PT will
extrapolate.
P_window (array_like or None): 2-element array describing
the tapering window used by FAST-PT. See FAST-PT
documentation for more details.
C_window (float): `C_window` parameter used by FAST-PT
to smooth the edges and avoid ringing. See FAST-PT
documentation for more details.
"""
def __init__(self, with_NC=False, with_IA=False, with_dd=True,
log10k_min=-4, log10k_max=2, nk_per_decade=20,
pad_factor=1, low_extrap=-5, high_extrap=3,
P_window=None, C_window=.75):
assert HAVE_FASTPT, (
"You must have the `FAST-PT` python package "
"installed to use CCL to get PT observables! "
"You can install it with pip install fast-pt.")
self.with_dd = with_dd
self.with_NC = with_NC
self.with_IA = with_IA
self.P_window = P_window
self.C_window = C_window
to_do = ['one_loop_dd']
if self.with_NC:
to_do.append('dd_bias')
if self.with_IA:
to_do.append('IA')
nk_total = int((log10k_max - log10k_min) * nk_per_decade)
self.ks = np.logspace(log10k_min, log10k_max, nk_total)
n_pad = int(pad_factor * len(self.ks))
self.pt = fpt.FASTPT(self.ks, to_do=to_do,
low_extrap=low_extrap,
high_extrap=high_extrap,
n_pad=n_pad)
self.one_loop_dd = None
self.dd_bias = None
self.ia_ta = None
self.ia_tt = None
self.ia_mix = None
def update_pk(self, pk):
""" Update the internal PT arrays.
Args:
pk (array_like): linear power spectrum sampled at the
internal `k` values used by this calculator.
"""
if pk.shape != self.ks.shape:
raise ValueError("Input spectrum has wrong shape")
if self.with_NC:
self._get_dd_bias(pk)
self.with_dd = True
elif self.with_dd:
self._get_one_loop_dd(pk)
if self.with_IA:
self._get_ia_bias(pk)
def _get_one_loop_dd(self, pk):
# Precompute quantities needed for one-loop dd
# power spectra. Only needed if dd_bias is not called.
self.one_loop_dd = self.pt.one_loop_dd(pk,
P_window=self.P_window,
C_window=self.C_window)
def _get_dd_bias(self, pk):
# Precompute quantities needed for number counts
# power spectra.
self.dd_bias = self.pt.one_loop_dd_bias(pk,
P_window=self.P_window,
C_window=self.C_window)
self.one_loop_dd = self.dd_bias[0:1]
def _get_ia_bias(self, pk):
# Precompute quantities needed for intrinsic alignment
# power spectra.
self.ia_ta = self.pt.IA_ta(pk,
P_window=self.P_window,
C_window=self.C_window)
self.ia_tt = self.pt.IA_tt(pk,
P_window=self.P_window,
C_window=self.C_window)
self.ia_mix = self.pt.IA_mix(pk,
P_window=self.P_window,
C_window=self.C_window)
def get_pgg(self, Pd1d1, g4,
b11, b21, bs1, b12, b22, bs2,
sub_lowk):
""" Get the number counts auto-spectrum at the internal
set of wavenumbers (given by this object's `ks` attribute)
and a number of redshift values.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
b11 (array_like): 1-st order bias for the first tracer
being correlated at the same set of input redshifts.
b21 (array_like): 2-nd order bias for the first tracer
being correlated at the same set of input redshifts.
bs1 (array_like): tidal bias for the first tracer
being correlated at the same set of input redshifts.
b12 (array_like): 1-st order bias for the second tracer
being correlated at the same set of input redshifts.
b22 (array_like): 2-nd order bias for the second tracer
being correlated at the same set of input redshifts.
bs2 (array_like): tidal bias for the second tracer
being correlated at the same set of input redshifts.
sub_lowk (bool): if True, the small-scale white noise
contribution will be subtracted.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
Pd1d2 = g4[None, :] * self.dd_bias[2][:, None]
Pd2d2 = g4[None, :] * self.dd_bias[3][:, None]
Pd1s2 = g4[None, :] * self.dd_bias[4][:, None]
Pd2s2 = g4[None, :] * self.dd_bias[5][:, None]
Ps2s2 = g4[None, :] * self.dd_bias[6][:, None]
s4 = 0.
if sub_lowk:
s4 = g4 * self.dd_bias[7]
s4 = s4[None, :]
pgg = ((b11*b12)[None, :] * Pd1d1 +
0.5*(b11*b22 + b12*b21)[None, :] * Pd1d2 +
0.25*(b21*b22)[None, :] * (Pd2d2 - 2.*s4) +
0.5*(b11*bs2 + b12*bs1)[None, :] * Pd1s2 +
0.25*(b21*bs2 + b22*bs1)[None, :] * (Pd2s2 - (4./3.)*s4) +
0.25*(bs1*bs2)[None, :] * (Ps2s2 - (8./9.)*s4))
return pgg
def get_pgi(self, Pd1d1, g4, b1, b2, bs, c1, c2, cd):
""" Get the number counts - IA cross-spectrum at the
internal set of wavenumbers (given by this object's
`ks` attribute) and a number of redshift values.
.. note:: The full non-linear model for the cross-correlation
between number counts and intrinsic alignments is
still work in progress in FastPT. As a workaround
CCL assumes a non-linear treatment of IAs, but only
linearly biased number counts.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
b1 (array_like): 1-st order bias for the number counts
being correlated at the same set of input redshifts.
b2 (array_like): 2-nd order bias for the number counts
being correlated at the same set of input redshifts.
bs (array_like): tidal bias for the number counts
being correlated at the same set of input redshifts.
c1 (array_like): 1-st order bias for the IA tracer
being correlated at the same set of input redshifts.
c2 (array_like): 2-nd order bias for the IA tracer
being correlated at the same set of input redshifts.
cd (array_like): overdensity bias for the IA tracer
being correlated at the same set of input redshifts.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
a00e, c00e, a0e0e, a0b0b = self.ia_ta
a0e2, b0e2, d0ee2, d0bb2 = self.ia_mix
pgi = b1[None, :] * (c1[None, :] * Pd1d1 +
(g4*cd)[None, :] * (a00e + c00e)[:, None] +
(g4*c2)[None, :] * (a0e2 + b0e2)[:, None])
return pgi
def get_pgm(self, Pd1d1, g4, b1, b2, bs):
""" Get the number counts - matter cross-spectrum at the
internal set of wavenumbers (given by this object's `ks`
attribute) and a number of redshift values.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
b1 (array_like): 1-st order bias for the number counts
tracer being correlated at the same set of input
redshifts.
b2 (array_like): 2-nd order bias for the number counts
tracer being correlated at the same set of input
redshifts.
bs (array_like): tidal bias for the number counts
tracer being correlated at the same set of input
redshifts.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
Pd1d2 = g4[None, :] * self.dd_bias[2][:, None]
Pd1s2 = g4[None, :] * self.dd_bias[4][:, None]
pgm = (b1[None, :] * Pd1d1 +
0.5 * b2[None, :] * Pd1d2 +
0.5 * bs[None, :] * Pd1s2)
return pgm
def get_pii(self, Pd1d1, g4, c11, c21, cd1,
c12, c22, cd2, return_bb=False,
return_both=False):
""" Get the intrinsic alignment auto-spectrum at the internal
set of wavenumbers (given by this object's `ks` attribute)
and a number of redshift values.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
c11 (array_like): 1-st order bias for the first tracer
being correlated at the same set of input redshifts.
c21 (array_like): 2-nd order bias for the first tracer
being correlated at the same set of input redshifts.
cd1 (array_like): overdensity bias for the first tracer
being correlated at the same set of input redshifts.
c12 (array_like): 1-st order bias for the second tracer
being correlated at the same set of input redshifts.
c22 (array_like): 2-nd order bias for the second tracer
being correlated at the same set of input redshifts.
cd2 (array_like): overdensity bias for the second tracer
being correlated at the same set of input redshifts.
return_bb (bool): if `True`, the B-mode power spectrum
will be returned.
return_both (bool): if `True`, both the E- and B-mode
power spectra will be returned. Supersedes `return_bb`.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
a00e, c00e, a0e0e, a0b0b = self.ia_ta
ae2e2, ab2b2 = self.ia_tt
a0e2, b0e2, d0ee2, d0bb2 = self.ia_mix
if return_both:
return_bb = True
if return_bb:
pii_bb = ((cd1*cd2*g4)[None, :] * a0b0b[:, None] +
(c21*c22*g4)[None, :] * ab2b2[:, None] +
((cd1*c22 + c21*cd2)*g4)[None, :] * d0bb2[:, None])
if not return_both:
pii = pii_bb
if (not return_bb) or return_both:
pii = ((c11*c12)[None, :] * Pd1d1 +
((c11*cd2 + c12*cd1)*g4)[None, :] * (a00e + c00e)[:, None] +
(cd1*cd2*g4)[None, :] * a0e0e[:, None] +
(c21*c22*g4)[None, :] * ae2e2[:, None] +
((c11*c22 + c21*c12)*g4)[None, :] * (a0e2 + b0e2)[:, None] +
((cd1*c22 + cd2*c21)*g4)[None, :] * d0ee2[:, None])
if return_both:
return pii, pii_bb
else:
return pii
def get_pim(self, Pd1d1, g4, c1, c2, cd):
""" Get the intrinsic alignment - matter cross-spectrum at
the internal set of wavenumbers (given by this object's `ks`
attribute) and a number of redshift values.
Args:
Pd1d1 (array_like): 1-loop matter power spectrum at the
wavenumber values given by this object's `ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
c1 (array_like): 1-st order bias for the IA
tracer being correlated at the same set of input
redshifts.
c2 (array_like): 2-nd order bias for the IA
tracer being correlated at the same set of input
redshifts.
cd (array_like): overdensity bias for the IA
tracer being correlated at the same set of input
redshifts.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
a00e, c00e, a0e0e, a0b0b = self.ia_ta
a0e2, b0e2, d0ee2, d0bb2 = self.ia_mix
pim = (c1[None, :] * Pd1d1 +
(g4*cd)[None, :] * (a00e + c00e)[:, None] +
(g4*c2)[None, :] * (a0e2 + b0e2)[:, None])
return pim
def get_pmm(self, Pd1d1_lin, g4):
""" Get the one-loop matter power spectrum.
Args:
Pd1d1_lin (array_like): 1-loop linear matter power spectrum
at the wavenumber values given by this object's
`ks` list.
g4 (array_like): fourth power of the growth factor at
a number of redshifts.
Returns:
array_like: 2D array of shape `(N_k, N_z)`, where `N_k` \
is the size of this object's `ks` attribute, and \
`N_z` is the size of the input redshift-dependent \
biases and growth factor.
"""
P1loop = g4[None, :] * self.one_loop_dd[0][:, None]
pmm = (Pd1d1_lin + P1loop)
return pmm
def get_pt_pk2d(cosmo, tracer1, tracer2=None, ptc=None,
sub_lowk=False, nonlin_pk_type='nonlinear',
a_arr=None, extrap_order_lok=1, extrap_order_hik=2,
return_ia_bb=False, return_ia_ee_and_bb=False):
"""Returns a :class:`~pyccl.pk2d.Pk2D` object containing
the PT power spectrum for two quantities defined by
two :class:`~pyccl.nl_pt.tracers.PTTracer` objects.
.. note:: The full non-linear model for the cross-correlation
between number counts and intrinsic alignments is
still work in progress in FastPT. As a workaround
CCL assumes a non-linear treatment of IAs, but only
linearly biased number counts.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object.
tracer1 (:class:`~pyccl.nl_pt.tracers.PTTracer`): the first
tracer being correlated.
ptc (:class:`PTCalculator`): a perturbation theory
calculator.
tracer2 (:class:`~pyccl.nl_pt.tracers.PTTracer`): the second
tracer being correlated. If `None`, the auto-correlation
of the first tracer will be returned.
sub_lowk (bool): if True, the small-scale white noise
contribution will be subtracted for number counts
auto-correlations.
nonlin_pk_type (str): type of 1-loop matter power spectrum
to use. 'linear' for linear P(k), 'nonlinear' for the internal
non-linear power spectrum, 'spt' for standard perturbation
theory power spectrum. Default: 'nonlinear'.
a_arr (array): an array holding values of the scale factor
at which the power spectrum should be calculated for
interpolation. If `None`, the internal values used by
`cosmo` will be used.
extrap_order_lok (int): extrapolation order to be used on
k-values below the minimum of the splines. See
:class:`~pyccl.pk2d.Pk2D`.
extrap_order_hik (int): extrapolation order to be used on
k-values above the maximum of the splines. See
:class:`~pyccl.pk2d.Pk2D`.
return_ia_bb (bool): if `True`, the B-mode power spectrum
for intrinsic alignments will be returned (if both
input tracers are of type
:class:`~pyccl.nl_pt.tracers.PTIntrinsicAlignmentTracer`)
If `False` (default) E-mode power spectrum is returned.
return_ia_ee_and_bb (bool): if `True`, the E-mode power spectrum
for intrinsic alignments will be returned in addition to
the B-mode one (if both input tracers are of type
:class:`~pyccl.nl_pt.tracers.PTIntrinsicAlignmentTracer`)
If `False` (default) E-mode power spectrum is returned.
Supersedes `return_ia_bb`.
Returns:
:class:`~pyccl.pk2d.Pk2D`: PT power spectrum.
"""
if a_arr is None:
status = 0
na = lib.get_pk_spline_na(cosmo.cosmo)
a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status)
check(status)
if tracer2 is None:
tracer2 = tracer1
if not isinstance(tracer1, PTTracer):
raise TypeError("tracer1 must be of type `PTTracer`")
if not isinstance(tracer2, PTTracer):
raise TypeError("tracer2 must be of type `PTTracer`")
if ptc is None:
with_NC = ((tracer1.type == 'NC')
or (tracer2.type == 'NC'))
with_IA = ((tracer1.type == 'IA')
or (tracer2.type == 'IA'))
with_dd = nonlin_pk_type == 'spt'
ptc = PTCalculator(with_dd=with_dd,
with_NC=with_NC,
with_IA=with_IA)
if not isinstance(ptc, PTCalculator):
raise TypeError("ptc should be of type `PTCalculator`")
if (tracer1.type == 'NC') or (tracer2.type == 'NC'):
if not ptc.with_NC:
raise ValueError("Need number counts bias, "
"but calculator didn't compute it")
if (tracer1.type == 'IA') or (tracer2.type == 'IA'):
if not ptc.with_IA:
raise ValueError("Need intrinsic alignment bias, "
"but calculator didn't compute it")
if nonlin_pk_type == 'spt':
if not ptc.with_dd:
raise ValueError("Need 1-loop matter power spectrum, "
"but calculator didn't compute it")
if return_ia_ee_and_bb:
return_ia_bb = True
# z
z_arr = 1. / a_arr - 1
# P_lin(k) at z=0
pk_lin_z0 = linear_matter_power(cosmo, ptc.ks, 1.)
# Linear growth factor
ga = growth_factor(cosmo, a_arr)
ga4 = ga**4
# update the PTC to have the require Pk components
ptc.update_pk(pk_lin_z0)
if nonlin_pk_type == 'nonlinear':
Pd1d1 = np.array([nonlin_matter_power(cosmo, ptc.ks, a)
for a in a_arr]).T
elif nonlin_pk_type == 'linear':
Pd1d1 = np.array([linear_matter_power(cosmo, ptc.ks, a)
for a in a_arr]).T
elif nonlin_pk_type == 'spt':
pklin = np.array([linear_matter_power(cosmo, ptc.ks, a)
for a in a_arr]).T
Pd1d1 = ptc.get_pmm(pklin, ga4)
else:
raise NotImplementedError("Nonlinear option %s not implemented yet" %
(nonlin_pk_type))
if (tracer1.type == 'NC'):
b11 = tracer1.b1(z_arr)
b21 = tracer1.b2(z_arr)
bs1 = tracer1.bs(z_arr)
if (tracer2.type == 'NC'):
b12 = tracer2.b1(z_arr)
b22 = tracer2.b2(z_arr)
bs2 = tracer2.bs(z_arr)
p_pt = ptc.get_pgg(Pd1d1, ga4,
b11, b21, bs1, b12, b22, bs2,
sub_lowk)
elif (tracer2.type == 'IA'):
c12 = tracer2.c1(z_arr)
c22 = tracer2.c2(z_arr)
cd2 = tracer2.cdelta(z_arr)
p_pt = ptc.get_pgi(Pd1d1, ga4,
b11, b21, bs1, c12, c22, cd2)
elif (tracer2.type == 'M'):
p_pt = ptc.get_pgm(Pd1d1, ga4,
b11, b21, bs1)
else:
raise NotImplementedError("Combination %s-%s not implemented yet" %
(tracer1.type, tracer2.type))
elif (tracer1.type == 'IA'):
c11 = tracer1.c1(z_arr)
c21 = tracer1.c2(z_arr)
cd1 = tracer1.cdelta(z_arr)
if (tracer2.type == 'IA'):
c12 = tracer2.c1(z_arr)
c22 = tracer2.c2(z_arr)
cd2 = tracer2.cdelta(z_arr)
p_pt = ptc.get_pii(Pd1d1, ga4,
c11, c21, cd1, c12, c22, cd2,
return_bb=return_ia_bb,
return_both=return_ia_ee_and_bb)
elif (tracer2.type == 'NC'):
b12 = tracer2.b1(z_arr)
b22 = tracer2.b2(z_arr)
bs2 = tracer2.bs(z_arr)
p_pt = ptc.get_pgi(Pd1d1, ga4,
b12, b22, bs2, c11, c21, cd1)
elif (tracer2.type == 'M'):
p_pt = ptc.get_pim(Pd1d1, ga4,
c11, c21, cd1)
else:
raise NotImplementedError("Combination %s-%s not implemented yet" %
(tracer1.type, tracer2.type))
elif (tracer1.type == 'M'):
if (tracer2.type == 'NC'):
b12 = tracer2.b1(z_arr)
b22 = tracer2.b2(z_arr)
bs2 = tracer2.bs(z_arr)
p_pt = ptc.get_pgm(Pd1d1, ga4,
b12, b22, bs2)
elif (tracer2.type == 'IA'):
c12 = tracer2.c1(z_arr)
c22 = tracer2.c2(z_arr)
cd2 = tracer2.cdelta(z_arr)
p_pt = ptc.get_pim(Pd1d1, ga4,
c12, c22, cd2)
elif (tracer2.type == 'M'):
p_pt = Pd1d1
else:
raise NotImplementedError("Combination %s-%s not implemented yet" %
(tracer1.type, tracer2.type))
else:
raise NotImplementedError("Combination %s-%s not implemented yet" %
(tracer1.type, tracer2.type))
# Once you have created the 2-dimensional P(k) array,
# then generate a Pk2D object as described in pk2d.py.
if return_ia_ee_and_bb:
pt_pk_ee = Pk2D(a_arr=a_arr,
lk_arr=np.log(ptc.ks),
pk_arr=p_pt[0].T,
is_logp=False)
pt_pk_bb = Pk2D(a_arr=a_arr,
lk_arr=np.log(ptc.ks),
pk_arr=p_pt[1].T,
is_logp=False)
return pt_pk_ee, pt_pk_bb
else:
pt_pk = Pk2D(a_arr=a_arr,
lk_arr=np.log(ptc.ks),
pk_arr=p_pt.T,
is_logp=False)
return pt_pk | 0.836888 | 0.509154 |
import os
from collections import namedtuple
from subprocess import check_output
import numpy as np
import psycopg2
def establish_connection():
"""Connect to an existing database
"""
conn = psycopg2.connect("dbname=vagrant user=vagrant")
return conn
def parse_season_day_period(time_id):
"""Returns the season, day and period value from an id
Argument
--------
time_id : int
An integer representing the interval count
Returns
-------
tuple
A tuple of ``(season, period)``
Notes
-----
time_id = (168 * (season - 1)) + es_hour + 1
"""
season = divmod(time_id - 1, 168)
day, period = divmod(season[1], 24)
return (season[0] + 1, day + 1, period + 1)
def parse_season_period(season_period_string):
"""Returns the season and period value from an id
Argument
--------
season_period_string : str
A string representation of the season_period_id
Returns
-------
tuple
A tuple of ``(season, period)``
"""
season, period = season_period_string.split("_")
season = int(season)
period = int(period)
return (season, period)
def get_day(period):
"""Returns the day of the week
"""
day = ((period - 1) // 24) + 1
return day
def get_node_numbers():
"""Returns the number of the bus associated with the region
"""
conn = establish_connection()
with conn.cursor() as cur:
sql = """SELECT * from "NodeData";"""
cur.execute(sql)
query_results = cur.fetchall()
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
bus_numbers = {}
for row in query_results:
bus_numbers[row[1]] = row[0]
return bus_numbers
def _get_bus_numbers():
"""Returns the number of the bus associated with the region
"""
conn = establish_connection()
with conn.cursor() as cur:
sql = """SELECT * from "BusData";"""
cur.execute(sql)
query_results = cur.fetchall()
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
bus_numbers = {}
for row in query_results:
bus_numbers[row[1]] = row[0]
return bus_numbers
def write_gas_demand_data(data):
"""Writes gas demand data into the database table
Columns: year, season, day, period (hour), bus number, value
Arguments
---------
data : list
A dict of list of SpaceTimeValue tuples
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
node_numbers = _get_node_numbers()
gas_data = data['gas_demand']
print("Inserting {} rows of data".format(len(gas_data)))
sql = """INSERT INTO "GasLoad" (Year, Season, Day, Period, GasNode, GasLoad) VALUES (%s, %s, %s, %s, %s, %s)"""
it = np.nditer(a, flags=['multi_index'])
while not it.finished:
data = it[0]
region, interval = it.multi_index
season, period = parse_season_period(interval)
insert_data = (year,
season,
day,
period,
node_number,
row.value)
it.iternext()
# print("Inserting {} into GasLoad".format(insert_data))
cur.execute(sql, insert_data)
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def write_electricity_demand_data(data):
"""Writes electricity demand data into database table
Columns: year, season, day, period (hour), bus number, value
Arguments
---------
data : list
A list of SpaceTimeValue tuples
Notes
-----
`data` is a list of tuples, which looks something like::
data > parameter > [SpaceTimeValue(region,
interval, value) ... ]
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
bus_numbers = _get_bus_numbers()
elec_data = data['electricity_demand']
print("Inserting {} rows of data".format(len(elec_data)))
sql = """INSERT INTO "ElecLoad" (Year, Season, Day, Period, BusNumber, ElecLoad) VALUES (%s, %s, %s, %s, %s, %s)"""
for row in elec_data:
season, period = parse_season_period(row.interval)
day = _get_day(period)
bus_number = int(bus_numbers[row.region])
insert_data = (data['timestep'],
season,
day,
period,
bus_number,
row.value)
# print("Inserting {} into ElecLoad".format(insert_data))
cur.execute(sql, insert_data)
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def write_heat_demand_data(year, data_res, data_com):
"""Writes heat demand data into database table
Arguments
---------
year : int
The current model year
data_res : numpy.ndarray
Residential heating data
data_com : numpy.ndarray
Commercial heating data
Notes
-----
Columns are::
year
season
day
period
eh_conn_num
heatload_res
heatload_com
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
sql = """INSERT INTO "HeatLoad_EH" (year, season, day, period, eh_conn_num, heatload_res, heatload_com) VALUES (%s, %s, %s, %s, %s, %s, %s)"""
it = np.nditer(data_res, flags=['multi_index'])
while not it.finished:
print(it, it.multi_index)
cell_res = it[0]
cell_com = data_com[it.multi_index]
print("Data: %s, %s", cell_res, cell_com)
region, interval = it.multi_index
season, day, period = parse_season_day_period(interval)
insert_data = (year,
season,
day,
period,
region,
float(cell_res),
float(cell_com))
cur.execute(sql, insert_data)
it.iternext()
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def get_cooling_water_demand():
"""Calculated cooling water demand as a function of thermal power station operation
Returns
-------
list
A list of dicts of water demand, with season, period, value
"""
# Connect to an existing database
conn = establish_connection()
# Open a cursor to perform database operations
with conn.cursor() as cur:
sql = """SELECT season, period, thermal from "O_Elec_Mix";"""
cur.execute(sql)
water_demand = []
for row in cur.fetchall():
cooling_water = _calculate_water_demand(row[2])
water_demand.append({'id': "{}_{}".format(row[0], row[1]),
'water_demand': cooling_water})
return water_demand
@staticmethod
def _calculate_water_demand(elec_generation):
"""Calculates water demand as a function of electricity generation
This is a stop-gap, until we calculate water demand in the energy supply
model
Arguments
---------
elec_generation : float
Electricity generation in MWh
Returns
-------
float
Cooling water demand in ML (million litres)
"""
COOLING_WATER_DEMAND_ML_PER_MWH = 150 / 10**6
cooling_water_demand = elec_generation * COOLING_WATER_DEMAND_ML_PER_MWH
return cooling_water_demand
def get_total_cost():
"""Gets total cost from the objective function table
Returns
-------
total_cost : float
The total cost in GBP
"""
# Connect to an existing database
conn = establish_connection()
# Open a cursor to perform database operations
with conn.cursor() as cur:
sql = """SELECT objective from "O_Objective";"""
cur.execute(sql)
total_cost = cur.fetchone()[0]
return total_cost
def get_prices():
"""Gets the prices from the O_Prices table
year integer,
season integer,
period integer,
e_prices double precision
Returns
-------
list
A list of dicts
"""
electricity_prices = []
conn = establish_connection()
# Open a cursor to perform database operations
with conn.cursor() as cur:
sql = """SELECT season, period, e_prices from "O_Elec_Prices";"""
cur.execute(sql)
for row in cur.fetchall():
electricity_prices.append({'id': "{}_{}".format(row[0], row[1]),
'electricity_price': row[2]})
return electricity_prices
def get_results():
"""Gets the results as defined in ``outputs.yaml``
"""
return {'water_demand': get_cooling_water_demand(),
'total_cost': get_total_cost(),
'total_emissions': get_total_emissions(),
'electricity_prices': get_prices()}
def get_model_executable():
"""Return path of ES model executable
"""
return os.path.join(os.path.dirname(__file__), 'model', 'MISTRAL_ES.exe')
def build_power_station(name, plant_type, region, capacity, build_year,
operational_life):
"""Writes a power station into the `GenerationData` table
Parameters
----------
name : str
Descriptive name of the power station
plant_type : int
The code of the plant type (4 = 'nuclear')
region : str
The name of the region
capacity : float
The capacity of the power station
build_year : int
The year in which the plant is constructed
technical_lifetime : int
The lifetime of the plant (used to calculate the retirement year)
Notes
-----
The table schema:
- gennum integer
- type integer
- generatorname character varying(255)
- gasnode integer
- busnum integer
- minpower double precision
- maxpower double precision
- pumpstoragecapacity double precision
- batterystorage double precision
- year integer
- retire integer
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
bus_num = _get_bus_numbers()
sql = """INSERT INTO "GeneratorData" (type, generatorname, busnum, maxpower, year, retire) VALUES (%s, %s, %s, %s, %s, %s)"""
data = (plant_type,
name,
bus_num[region],
capacity, build_year,
build_year + operational_life)
cur.execute(sql, data)
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def increase_gas_terminal_capacity(terminal_number, capacity_increase):
"""Writes a new gas terminal into the `GasTerminal` table
Parameters
----------
terminal_number: int
The number of an existing gas terminal
capacity_increase: float
The amount by which the terminal capacity will increase
Notes
-----
The table schema:
- terminalnumber double precision,
- year double precision,
- name character varying(255),
- gasnode double precision,
- gasterminaloptcost double precision,
- terminalcapacity double precision,
- lngcapacity double precision,
- intercapacity double precision,
- domcapacity double precision
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
node_num = _get_node_numbers()
sql = """UPDATE "GasTerminal" SET terminalcapacity=terminalcapacity+(%s) \
WHERE terminalnumber = (%s)"""
query_data = (capacity_increase, terminal_number)
cur.execute(sql, query_data)
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def simulate(decisions, state, data):
"""Runs the energy supply model
Arguments
---------
decisions : list
state : list
data : dict
A dict of lists-of-dicts
"""
# Write decisions into the input tables
for decision in decisions:
if decision.name == 'nuclear_power_station':
name = decision.name
plant_type = decision.data['power_generation_type']['value']
region = decision.data['location']['value']
capacity = decision.data['capacity']['value']
build_year = data['timestep']
operational_life = decision.data['operational_lifetime']['value']
build_power_station(name, plant_type, region, capacity,
build_year,
operational_life)
elif decision.name == 'IOG_gas_terminal_expansion':
capacity = decision.data['capacity']['value']
terminal_number = decision.data['gas_terminal_number']['value']
increase_gas_terminal_capacity(terminal_number, capacity)
# Write demand data into input tables
# print(data)
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
cur.execute("""DELETE FROM "ElecLoad";""")
cur.execute("""DELETE FROM "GasLoad";""")
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
write_electricity_demand_data(data)
write_gas_demand_data(data)
# Run the model
arguments = [get_model_executable()]
output = check_output(arguments)
results = get_results()
print("Emissions: {}".format(results['total_emissions']))
print("Total Cost: {}".format(results['total_cost']))
return results | models/energy_supply/utilities.py | import os
from collections import namedtuple
from subprocess import check_output
import numpy as np
import psycopg2
def establish_connection():
"""Connect to an existing database
"""
conn = psycopg2.connect("dbname=vagrant user=vagrant")
return conn
def parse_season_day_period(time_id):
"""Returns the season, day and period value from an id
Argument
--------
time_id : int
An integer representing the interval count
Returns
-------
tuple
A tuple of ``(season, period)``
Notes
-----
time_id = (168 * (season - 1)) + es_hour + 1
"""
season = divmod(time_id - 1, 168)
day, period = divmod(season[1], 24)
return (season[0] + 1, day + 1, period + 1)
def parse_season_period(season_period_string):
"""Returns the season and period value from an id
Argument
--------
season_period_string : str
A string representation of the season_period_id
Returns
-------
tuple
A tuple of ``(season, period)``
"""
season, period = season_period_string.split("_")
season = int(season)
period = int(period)
return (season, period)
def get_day(period):
"""Returns the day of the week
"""
day = ((period - 1) // 24) + 1
return day
def get_node_numbers():
"""Returns the number of the bus associated with the region
"""
conn = establish_connection()
with conn.cursor() as cur:
sql = """SELECT * from "NodeData";"""
cur.execute(sql)
query_results = cur.fetchall()
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
bus_numbers = {}
for row in query_results:
bus_numbers[row[1]] = row[0]
return bus_numbers
def _get_bus_numbers():
"""Returns the number of the bus associated with the region
"""
conn = establish_connection()
with conn.cursor() as cur:
sql = """SELECT * from "BusData";"""
cur.execute(sql)
query_results = cur.fetchall()
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
bus_numbers = {}
for row in query_results:
bus_numbers[row[1]] = row[0]
return bus_numbers
def write_gas_demand_data(data):
"""Writes gas demand data into the database table
Columns: year, season, day, period (hour), bus number, value
Arguments
---------
data : list
A dict of list of SpaceTimeValue tuples
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
node_numbers = _get_node_numbers()
gas_data = data['gas_demand']
print("Inserting {} rows of data".format(len(gas_data)))
sql = """INSERT INTO "GasLoad" (Year, Season, Day, Period, GasNode, GasLoad) VALUES (%s, %s, %s, %s, %s, %s)"""
it = np.nditer(a, flags=['multi_index'])
while not it.finished:
data = it[0]
region, interval = it.multi_index
season, period = parse_season_period(interval)
insert_data = (year,
season,
day,
period,
node_number,
row.value)
it.iternext()
# print("Inserting {} into GasLoad".format(insert_data))
cur.execute(sql, insert_data)
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def write_electricity_demand_data(data):
"""Writes electricity demand data into database table
Columns: year, season, day, period (hour), bus number, value
Arguments
---------
data : list
A list of SpaceTimeValue tuples
Notes
-----
`data` is a list of tuples, which looks something like::
data > parameter > [SpaceTimeValue(region,
interval, value) ... ]
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
bus_numbers = _get_bus_numbers()
elec_data = data['electricity_demand']
print("Inserting {} rows of data".format(len(elec_data)))
sql = """INSERT INTO "ElecLoad" (Year, Season, Day, Period, BusNumber, ElecLoad) VALUES (%s, %s, %s, %s, %s, %s)"""
for row in elec_data:
season, period = parse_season_period(row.interval)
day = _get_day(period)
bus_number = int(bus_numbers[row.region])
insert_data = (data['timestep'],
season,
day,
period,
bus_number,
row.value)
# print("Inserting {} into ElecLoad".format(insert_data))
cur.execute(sql, insert_data)
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def write_heat_demand_data(year, data_res, data_com):
"""Writes heat demand data into database table
Arguments
---------
year : int
The current model year
data_res : numpy.ndarray
Residential heating data
data_com : numpy.ndarray
Commercial heating data
Notes
-----
Columns are::
year
season
day
period
eh_conn_num
heatload_res
heatload_com
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
sql = """INSERT INTO "HeatLoad_EH" (year, season, day, period, eh_conn_num, heatload_res, heatload_com) VALUES (%s, %s, %s, %s, %s, %s, %s)"""
it = np.nditer(data_res, flags=['multi_index'])
while not it.finished:
print(it, it.multi_index)
cell_res = it[0]
cell_com = data_com[it.multi_index]
print("Data: %s, %s", cell_res, cell_com)
region, interval = it.multi_index
season, day, period = parse_season_day_period(interval)
insert_data = (year,
season,
day,
period,
region,
float(cell_res),
float(cell_com))
cur.execute(sql, insert_data)
it.iternext()
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def get_cooling_water_demand():
"""Calculated cooling water demand as a function of thermal power station operation
Returns
-------
list
A list of dicts of water demand, with season, period, value
"""
# Connect to an existing database
conn = establish_connection()
# Open a cursor to perform database operations
with conn.cursor() as cur:
sql = """SELECT season, period, thermal from "O_Elec_Mix";"""
cur.execute(sql)
water_demand = []
for row in cur.fetchall():
cooling_water = _calculate_water_demand(row[2])
water_demand.append({'id': "{}_{}".format(row[0], row[1]),
'water_demand': cooling_water})
return water_demand
@staticmethod
def _calculate_water_demand(elec_generation):
"""Calculates water demand as a function of electricity generation
This is a stop-gap, until we calculate water demand in the energy supply
model
Arguments
---------
elec_generation : float
Electricity generation in MWh
Returns
-------
float
Cooling water demand in ML (million litres)
"""
COOLING_WATER_DEMAND_ML_PER_MWH = 150 / 10**6
cooling_water_demand = elec_generation * COOLING_WATER_DEMAND_ML_PER_MWH
return cooling_water_demand
def get_total_cost():
"""Gets total cost from the objective function table
Returns
-------
total_cost : float
The total cost in GBP
"""
# Connect to an existing database
conn = establish_connection()
# Open a cursor to perform database operations
with conn.cursor() as cur:
sql = """SELECT objective from "O_Objective";"""
cur.execute(sql)
total_cost = cur.fetchone()[0]
return total_cost
def get_prices():
"""Gets the prices from the O_Prices table
year integer,
season integer,
period integer,
e_prices double precision
Returns
-------
list
A list of dicts
"""
electricity_prices = []
conn = establish_connection()
# Open a cursor to perform database operations
with conn.cursor() as cur:
sql = """SELECT season, period, e_prices from "O_Elec_Prices";"""
cur.execute(sql)
for row in cur.fetchall():
electricity_prices.append({'id': "{}_{}".format(row[0], row[1]),
'electricity_price': row[2]})
return electricity_prices
def get_results():
"""Gets the results as defined in ``outputs.yaml``
"""
return {'water_demand': get_cooling_water_demand(),
'total_cost': get_total_cost(),
'total_emissions': get_total_emissions(),
'electricity_prices': get_prices()}
def get_model_executable():
"""Return path of ES model executable
"""
return os.path.join(os.path.dirname(__file__), 'model', 'MISTRAL_ES.exe')
def build_power_station(name, plant_type, region, capacity, build_year,
operational_life):
"""Writes a power station into the `GenerationData` table
Parameters
----------
name : str
Descriptive name of the power station
plant_type : int
The code of the plant type (4 = 'nuclear')
region : str
The name of the region
capacity : float
The capacity of the power station
build_year : int
The year in which the plant is constructed
technical_lifetime : int
The lifetime of the plant (used to calculate the retirement year)
Notes
-----
The table schema:
- gennum integer
- type integer
- generatorname character varying(255)
- gasnode integer
- busnum integer
- minpower double precision
- maxpower double precision
- pumpstoragecapacity double precision
- batterystorage double precision
- year integer
- retire integer
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
bus_num = _get_bus_numbers()
sql = """INSERT INTO "GeneratorData" (type, generatorname, busnum, maxpower, year, retire) VALUES (%s, %s, %s, %s, %s, %s)"""
data = (plant_type,
name,
bus_num[region],
capacity, build_year,
build_year + operational_life)
cur.execute(sql, data)
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def increase_gas_terminal_capacity(terminal_number, capacity_increase):
"""Writes a new gas terminal into the `GasTerminal` table
Parameters
----------
terminal_number: int
The number of an existing gas terminal
capacity_increase: float
The amount by which the terminal capacity will increase
Notes
-----
The table schema:
- terminalnumber double precision,
- year double precision,
- name character varying(255),
- gasnode double precision,
- gasterminaloptcost double precision,
- terminalcapacity double precision,
- lngcapacity double precision,
- intercapacity double precision,
- domcapacity double precision
"""
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
node_num = _get_node_numbers()
sql = """UPDATE "GasTerminal" SET terminalcapacity=terminalcapacity+(%s) \
WHERE terminalnumber = (%s)"""
query_data = (capacity_increase, terminal_number)
cur.execute(sql, query_data)
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
def simulate(decisions, state, data):
"""Runs the energy supply model
Arguments
---------
decisions : list
state : list
data : dict
A dict of lists-of-dicts
"""
# Write decisions into the input tables
for decision in decisions:
if decision.name == 'nuclear_power_station':
name = decision.name
plant_type = decision.data['power_generation_type']['value']
region = decision.data['location']['value']
capacity = decision.data['capacity']['value']
build_year = data['timestep']
operational_life = decision.data['operational_lifetime']['value']
build_power_station(name, plant_type, region, capacity,
build_year,
operational_life)
elif decision.name == 'IOG_gas_terminal_expansion':
capacity = decision.data['capacity']['value']
terminal_number = decision.data['gas_terminal_number']['value']
increase_gas_terminal_capacity(terminal_number, capacity)
# Write demand data into input tables
# print(data)
conn = establish_connection()
# Open a cursor to perform database operations
cur = conn.cursor()
cur.execute("""DELETE FROM "ElecLoad";""")
cur.execute("""DELETE FROM "GasLoad";""")
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
write_electricity_demand_data(data)
write_gas_demand_data(data)
# Run the model
arguments = [get_model_executable()]
output = check_output(arguments)
results = get_results()
print("Emissions: {}".format(results['total_emissions']))
print("Total Cost: {}".format(results['total_cost']))
return results | 0.612078 | 0.52476 |
from __future__ import print_function
import argparse
import pandas as pd
TAXLEVELS = {'k': 'kingdom',
'p': 'phylum',
'c': 'class',
'o': 'order',
'f': 'family',
'g': 'genus',
's': 'species',
't': 'subtype'}
def main():
''' Converts the taxonomic profile output of MetaPhlAn2 into a proper table
by parsing the clade name column into two columns, one column for
taxonomic rank and one for the taxonomic unit.
'''
# Read tax profile
taxprofile = pd.read_csv(Args['input'], sep='\t',
engine='python',
skiprows=1, skipfooter=1)
# Separate taxonomic rank and unit from each other
taxprofile[['rank', 'unit']] = taxprofile['#clade_name'].\
str.split("|").str[-1].\
str.extract(r"([a-z])\_\_([A-Z[a-z0-9_]+)")
taxprofile['rank'].replace(TAXLEVELS, inplace=True)
# Add sample name
taxprofile['sample'] = open(Args['input']).readline().rstrip().split("\t")[-1]
# Write to file
taxprofile[['sample', 'rank', 'unit', 'relative_abundance', 'coverage',
'average_genome_length_in_the_clade',
'estimated_number_of_reads_from_the_clade']].\
to_csv(Args['output'], sep="\t", index=False)
# Argument parser
Parser = argparse.ArgumentParser(description='Converts the taxonomic profile ' +
'produced by MetaPhlAn2 into a table by ' +
'splitting the clade name into rank and ' +
'unit and adding the sample name as a ' +
'column.')
Parser.add_argument('-i', '--input', required=True,
help='taxonomic profile produced by MetaPhlAn2')
Parser.add_argument('-o', '--output', required=True,
help='CSV file')
Args = vars(Parser.parse_args())
if __name__ == '__main__':
main() | scripts/metaphlan2_taxprofile2csv.py |
from __future__ import print_function
import argparse
import pandas as pd
TAXLEVELS = {'k': 'kingdom',
'p': 'phylum',
'c': 'class',
'o': 'order',
'f': 'family',
'g': 'genus',
's': 'species',
't': 'subtype'}
def main():
''' Converts the taxonomic profile output of MetaPhlAn2 into a proper table
by parsing the clade name column into two columns, one column for
taxonomic rank and one for the taxonomic unit.
'''
# Read tax profile
taxprofile = pd.read_csv(Args['input'], sep='\t',
engine='python',
skiprows=1, skipfooter=1)
# Separate taxonomic rank and unit from each other
taxprofile[['rank', 'unit']] = taxprofile['#clade_name'].\
str.split("|").str[-1].\
str.extract(r"([a-z])\_\_([A-Z[a-z0-9_]+)")
taxprofile['rank'].replace(TAXLEVELS, inplace=True)
# Add sample name
taxprofile['sample'] = open(Args['input']).readline().rstrip().split("\t")[-1]
# Write to file
taxprofile[['sample', 'rank', 'unit', 'relative_abundance', 'coverage',
'average_genome_length_in_the_clade',
'estimated_number_of_reads_from_the_clade']].\
to_csv(Args['output'], sep="\t", index=False)
# Argument parser
Parser = argparse.ArgumentParser(description='Converts the taxonomic profile ' +
'produced by MetaPhlAn2 into a table by ' +
'splitting the clade name into rank and ' +
'unit and adding the sample name as a ' +
'column.')
Parser.add_argument('-i', '--input', required=True,
help='taxonomic profile produced by MetaPhlAn2')
Parser.add_argument('-o', '--output', required=True,
help='CSV file')
Args = vars(Parser.parse_args())
if __name__ == '__main__':
main() | 0.660391 | 0.206214 |
import jpype
import common
class MyImpl(object):
def blah(self):
pass
class ClassProxy:
def __init__(self, proxy):
self.proxy = proxy
class ArrayProxy:
def __init__(self, proxy):
self.proxy = proxy
class StringProxy:
def __init__(self, proxy):
self.proxy = proxy
class ClassHintsTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.Custom = jpype.JClass("jpype.classhints.Custom")
self.ClassHintsTest = jpype.JClass("jpype.classhints.ClassHintsTest")
@jpype.JImplements("jpype.classhints.Custom")
class MyCustom(object):
def __init__(self, arg):
self.arg = arg
self.MyCustom = MyCustom
def testCharSequence(self):
Instant = jpype.JClass("java.time.Instant")
s = "2019-12-21T05:26:13.223189Z"
self.assertTrue(str(Instant.parse(s)), s)
def testInstant(self):
import datetime
now = datetime.datetime.utcnow()
Instant = jpype.JClass("java.time.Instant")
self.assertIsInstance(jpype.JObject(now, Instant), Instant)
def testPath(self):
import pathlib
JPath = jpype.JClass("java.nio.file.Path")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JPath), JPath)
def testFile(self):
import pathlib
JFile = jpype.JClass("java.io.File")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JFile), JFile)
def testConvertExact(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call("hello")
@jpype.JConversion(self.Custom, exact=str)
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call("hello")
self.assertIsInstance(cht.input, self.MyCustom)
self.assertEqual(cht.input.arg, "hello")
def testConvertAttribute(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call(MyImpl())
@jpype.JConversion(self.Custom, attribute="blah")
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call(MyImpl())
self.assertIsInstance(cht.input, self.MyCustom)
self.assertIsInstance(cht.input.arg, MyImpl)
def testClassCustomizer(self):
@jpype.JConversion("java.lang.Class", instanceof=ClassProxy)
def ClassCustomizer(jcls, obj):
return obj.proxy
hints = jpype.JClass('java.lang.Class')._hints
self.assertTrue(ClassProxy in hints.implicit)
def testArrayCustomizer(self):
@jpype.JConversion(jpype.JInt[:], instanceof=ArrayProxy)
def ArrayCustomizer(jcls, obj):
return obj.proxy
hints = jpype.JClass(jpype.JInt[:])._hints
self.assertTrue(ArrayProxy in hints.implicit)
def testStringCustomizer(self):
@jpype.JConversion("java.lang.String", instanceof=StringProxy)
def STringCustomizer(jcls, obj):
return obj.proxy
hints = jpype.JClass("java.lang.String")._hints
self.assertTrue(StringProxy in hints.implicit) | test/jpypetest/test_classhints.py | import jpype
import common
class MyImpl(object):
def blah(self):
pass
class ClassProxy:
def __init__(self, proxy):
self.proxy = proxy
class ArrayProxy:
def __init__(self, proxy):
self.proxy = proxy
class StringProxy:
def __init__(self, proxy):
self.proxy = proxy
class ClassHintsTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.Custom = jpype.JClass("jpype.classhints.Custom")
self.ClassHintsTest = jpype.JClass("jpype.classhints.ClassHintsTest")
@jpype.JImplements("jpype.classhints.Custom")
class MyCustom(object):
def __init__(self, arg):
self.arg = arg
self.MyCustom = MyCustom
def testCharSequence(self):
Instant = jpype.JClass("java.time.Instant")
s = "2019-12-21T05:26:13.223189Z"
self.assertTrue(str(Instant.parse(s)), s)
def testInstant(self):
import datetime
now = datetime.datetime.utcnow()
Instant = jpype.JClass("java.time.Instant")
self.assertIsInstance(jpype.JObject(now, Instant), Instant)
def testPath(self):
import pathlib
JPath = jpype.JClass("java.nio.file.Path")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JPath), JPath)
def testFile(self):
import pathlib
JFile = jpype.JClass("java.io.File")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JFile), JFile)
def testConvertExact(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call("hello")
@jpype.JConversion(self.Custom, exact=str)
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call("hello")
self.assertIsInstance(cht.input, self.MyCustom)
self.assertEqual(cht.input.arg, "hello")
def testConvertAttribute(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call(MyImpl())
@jpype.JConversion(self.Custom, attribute="blah")
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call(MyImpl())
self.assertIsInstance(cht.input, self.MyCustom)
self.assertIsInstance(cht.input.arg, MyImpl)
def testClassCustomizer(self):
@jpype.JConversion("java.lang.Class", instanceof=ClassProxy)
def ClassCustomizer(jcls, obj):
return obj.proxy
hints = jpype.JClass('java.lang.Class')._hints
self.assertTrue(ClassProxy in hints.implicit)
def testArrayCustomizer(self):
@jpype.JConversion(jpype.JInt[:], instanceof=ArrayProxy)
def ArrayCustomizer(jcls, obj):
return obj.proxy
hints = jpype.JClass(jpype.JInt[:])._hints
self.assertTrue(ArrayProxy in hints.implicit)
def testStringCustomizer(self):
@jpype.JConversion("java.lang.String", instanceof=StringProxy)
def STringCustomizer(jcls, obj):
return obj.proxy
hints = jpype.JClass("java.lang.String")._hints
self.assertTrue(StringProxy in hints.implicit) | 0.63624 | 0.340102 |
from lib import route, response, request, static_file, getSys, getMem, getHtop, abort, return_Json, getPath, login_required, getUser, Login, Logout, addAdmin, delAdmin, getAdminList
import os
# 首页403
@route('/', method=['GET','POST','PUT','DELETE','OPTIONS'])
def index():
return abort(403)
# 防止爬虫
@route('/robots.txt', method='GET')
def robots():
response.content_type = 'text/plain; charset=UTF-8'
return '''User-agent: *
Disallow: /'''
# icon
@route('/favicon.ico', method='GET')
def favicon():
response.content_type = 'application/x-ico'
return static_file('favicon.ico', root=os.path.join(os.getcwd(), 'static'), mimetype = 'application/x-ico')
# Login
@route('/login', method='POST')
def login():
username = request.forms.username
password = <PASSWORD>.forms.password
data = Login(username, password)
return return_Json(data)
# Logout
@route('/logout', method='GET')
@login_required
def logout():
data = Logout()
return return_Json(data)
# AdminList
@route('/getadminlist', method='GET')
@login_required
def getAdminlist():
data = getAdminList()
return return_Json(data)
# Add Admin
@route('/addAdmin', method='POST')
@login_required
def addadmin():
username = request.forms.username
password = <PASSWORD>
root_path = request.forms.root_path
isadmin = request.forms.isadmin
if(isadmin == 'true'):
isadmin = True
else:
isadmin = False
params = {
'username': username,
'password': password,
'root_path': root_path,
'isadmin': isadmin
}
data = addAdmin(params)
return return_Json(data)
# Delete Admin
@route('/delAdmin', method='DELETE')
@login_required
def deladmin():
username = request.forms.username
data = delAdmin(username)
return return_Json(data)
# CPU,硬盘,系统信息
@route('/getSystem', method='GET')
@login_required
def getsystem():
system = getSys()
return return_Json(system)
# 内存,网络
@route('/getSync', method='GET')
@login_required
def getsync():
mem = getMem()
return return_Json(mem)
# 进程列表
@route('/htop', method='GET')
@login_required
def gethtop():
top = getHtop()
return return_Json(top)
# 目录获取
@route('/getPath', method='GET')
@login_required
def getpath():
path = request.query.path
default_path = getUser()
if default_path['status'] == 0:
default_path = default_path['user']['root_path']
data = getPath(default_path, path)
return data.getJson()
return default_path | router.py | from lib import route, response, request, static_file, getSys, getMem, getHtop, abort, return_Json, getPath, login_required, getUser, Login, Logout, addAdmin, delAdmin, getAdminList
import os
# 首页403
@route('/', method=['GET','POST','PUT','DELETE','OPTIONS'])
def index():
return abort(403)
# 防止爬虫
@route('/robots.txt', method='GET')
def robots():
response.content_type = 'text/plain; charset=UTF-8'
return '''User-agent: *
Disallow: /'''
# icon
@route('/favicon.ico', method='GET')
def favicon():
response.content_type = 'application/x-ico'
return static_file('favicon.ico', root=os.path.join(os.getcwd(), 'static'), mimetype = 'application/x-ico')
# Login
@route('/login', method='POST')
def login():
username = request.forms.username
password = <PASSWORD>.forms.password
data = Login(username, password)
return return_Json(data)
# Logout
@route('/logout', method='GET')
@login_required
def logout():
data = Logout()
return return_Json(data)
# AdminList
@route('/getadminlist', method='GET')
@login_required
def getAdminlist():
data = getAdminList()
return return_Json(data)
# Add Admin
@route('/addAdmin', method='POST')
@login_required
def addadmin():
username = request.forms.username
password = <PASSWORD>
root_path = request.forms.root_path
isadmin = request.forms.isadmin
if(isadmin == 'true'):
isadmin = True
else:
isadmin = False
params = {
'username': username,
'password': password,
'root_path': root_path,
'isadmin': isadmin
}
data = addAdmin(params)
return return_Json(data)
# Delete Admin
@route('/delAdmin', method='DELETE')
@login_required
def deladmin():
username = request.forms.username
data = delAdmin(username)
return return_Json(data)
# CPU,硬盘,系统信息
@route('/getSystem', method='GET')
@login_required
def getsystem():
system = getSys()
return return_Json(system)
# 内存,网络
@route('/getSync', method='GET')
@login_required
def getsync():
mem = getMem()
return return_Json(mem)
# 进程列表
@route('/htop', method='GET')
@login_required
def gethtop():
top = getHtop()
return return_Json(top)
# 目录获取
@route('/getPath', method='GET')
@login_required
def getpath():
path = request.query.path
default_path = getUser()
if default_path['status'] == 0:
default_path = default_path['user']['root_path']
data = getPath(default_path, path)
return data.getJson()
return default_path | 0.189334 | 0.036494 |
import unittest
from touchdown.core import errors
from touchdown.core.workspace import Workspace
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.fixtures.aws import VpcFixture
from touchdown.tests.stubs.aws import NetworkAclStubber
class TestNetworkAclCreation(StubberTestCase):
def test_create_network_acl(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
network_acl = self.fixtures.enter_context(
NetworkAclStubber(
goal.get_service(
vpcf.vpc.add_network_acl(name="test-network-acl"), "apply"
)
)
)
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_create_network_acl()
network_acl.add_create_tags(Name="test-network-acl.1")
# Wait for it to exist...
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
# Update local cache of remote state
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
goal.execute()
def test_create_network_acl_idempotent(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
network_acl = self.fixtures.enter_context(
NetworkAclStubber(
goal.get_service(
vpcf.vpc.add_network_acl(name="test-network-acl"), "apply"
)
)
)
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(network_acl.resource)), 0)
class TestNetworkAclDestroy(StubberTestCase):
def test_destroy_network_acl(self):
goal = self.create_goal("destroy")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
network_acl = self.fixtures.enter_context(
NetworkAclStubber(
goal.get_service(
vpcf.vpc.add_network_acl(name="test-network-acl"), "destroy"
)
)
)
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_delete_network_acl()
goal.execute()
def test_destroy_network_acl_idempotent(self):
goal = self.create_goal("destroy")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
network_acl = self.fixtures.enter_context(
NetworkAclStubber(
goal.get_service(
vpcf.vpc.add_network_acl(name="test-network-acl"), "destroy"
)
)
)
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_describe_network_acls_empty_response_by_name()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(network_acl.resource)), 0)
class TestNetworkAclRules(unittest.TestCase):
def setUp(self):
self.workspace = Workspace()
self.aws = self.workspace.add_aws(region="eu-west-1")
self.vpc = self.aws.add_vpc(name="test-vpc")
def test_simple_rule_with_all_ports(self):
acl = self.vpc.add_network_acl(
name="test-acl",
inbound=[dict(network="10.0.0.0/20", protocol="tcp", port="*")],
)
assert acl.inbound[0].port.start == 1
assert acl.inbound[0].port.end == 65535
def test_simple_rule_with_single_port(self):
acl = self.vpc.add_network_acl(
name="test-acl",
inbound=[dict(network="10.0.0.0/20", protocol="tcp", port=20)],
)
assert acl.inbound[0].port.start == 20
assert acl.inbound[0].port.end == 20
def test_simple_rule_with_port_range(self):
acl = self.vpc.add_network_acl(
name="test-acl",
inbound=[
dict(
network="10.0.0.0/20", protocol="tcp", port__start=20, port__end=40
)
],
)
assert acl.inbound[0].port.start == 20
assert acl.inbound[0].port.end == 40
def test_mixing_port_and_port__start(self):
self.assertRaises(
errors.InvalidParameter,
self.vpc.add_network_acl,
name="test-acl",
inbound=[
dict(network="10.0.0.0/20", protocol="tcp", port=20, port__start=20)
],
)
def test_mixing_port_and_port__end(self):
self.assertRaises(
errors.InvalidParameter,
self.vpc.add_network_acl,
name="test-acl",
inbound=[
dict(network="10.0.0.0/20", protocol="tcp", port=20, port__end=20)
],
) | touchdown/tests/test_aws_vpc_network_acl.py |
import unittest
from touchdown.core import errors
from touchdown.core.workspace import Workspace
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.fixtures.aws import VpcFixture
from touchdown.tests.stubs.aws import NetworkAclStubber
class TestNetworkAclCreation(StubberTestCase):
def test_create_network_acl(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
network_acl = self.fixtures.enter_context(
NetworkAclStubber(
goal.get_service(
vpcf.vpc.add_network_acl(name="test-network-acl"), "apply"
)
)
)
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_create_network_acl()
network_acl.add_create_tags(Name="test-network-acl.1")
# Wait for it to exist...
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
# Update local cache of remote state
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
goal.execute()
def test_create_network_acl_idempotent(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
network_acl = self.fixtures.enter_context(
NetworkAclStubber(
goal.get_service(
vpcf.vpc.add_network_acl(name="test-network-acl"), "apply"
)
)
)
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(network_acl.resource)), 0)
class TestNetworkAclDestroy(StubberTestCase):
def test_destroy_network_acl(self):
goal = self.create_goal("destroy")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
network_acl = self.fixtures.enter_context(
NetworkAclStubber(
goal.get_service(
vpcf.vpc.add_network_acl(name="test-network-acl"), "destroy"
)
)
)
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_describe_network_acls_one_response_by_name()
network_acl.add_delete_network_acl()
goal.execute()
def test_destroy_network_acl_idempotent(self):
goal = self.create_goal("destroy")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
network_acl = self.fixtures.enter_context(
NetworkAclStubber(
goal.get_service(
vpcf.vpc.add_network_acl(name="test-network-acl"), "destroy"
)
)
)
network_acl.add_describe_network_acls_empty_response_by_name()
network_acl.add_describe_network_acls_empty_response_by_name()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(network_acl.resource)), 0)
class TestNetworkAclRules(unittest.TestCase):
def setUp(self):
self.workspace = Workspace()
self.aws = self.workspace.add_aws(region="eu-west-1")
self.vpc = self.aws.add_vpc(name="test-vpc")
def test_simple_rule_with_all_ports(self):
acl = self.vpc.add_network_acl(
name="test-acl",
inbound=[dict(network="10.0.0.0/20", protocol="tcp", port="*")],
)
assert acl.inbound[0].port.start == 1
assert acl.inbound[0].port.end == 65535
def test_simple_rule_with_single_port(self):
acl = self.vpc.add_network_acl(
name="test-acl",
inbound=[dict(network="10.0.0.0/20", protocol="tcp", port=20)],
)
assert acl.inbound[0].port.start == 20
assert acl.inbound[0].port.end == 20
def test_simple_rule_with_port_range(self):
acl = self.vpc.add_network_acl(
name="test-acl",
inbound=[
dict(
network="10.0.0.0/20", protocol="tcp", port__start=20, port__end=40
)
],
)
assert acl.inbound[0].port.start == 20
assert acl.inbound[0].port.end == 40
def test_mixing_port_and_port__start(self):
self.assertRaises(
errors.InvalidParameter,
self.vpc.add_network_acl,
name="test-acl",
inbound=[
dict(network="10.0.0.0/20", protocol="tcp", port=20, port__start=20)
],
)
def test_mixing_port_and_port__end(self):
self.assertRaises(
errors.InvalidParameter,
self.vpc.add_network_acl,
name="test-acl",
inbound=[
dict(network="10.0.0.0/20", protocol="tcp", port=20, port__end=20)
],
) | 0.530236 | 0.331012 |
from oneflow.compatible.single_client.experimental.load_mnist import load_mnist
from oneflow.compatible.single_client.ops.data_ops import (
BlobConf,
ImageCodec,
ImagePreprocessor,
ImageResizePreprocessor,
NormByChannelPreprocessor,
RawCodec,
decode_ofrecord,
decode_random,
)
from oneflow.compatible.single_client.ops.data_ops import (
image_decoder_random_crop_resize,
)
from oneflow.compatible.single_client.ops.data_ops import (
image_decoder_random_crop_resize as ImageDecoderRandomCropResize,
)
from oneflow.compatible.single_client.ops.data_ops import (
ofrecord_loader,
ofrecord_reader,
onerec_reader,
)
from oneflow.compatible.single_client.ops.user_data_ops import OFRecordBytesDecoder
from oneflow.compatible.single_client.ops.user_data_ops import (
OFRecordBytesDecoder as ofrecord_bytes_decoder,
)
from oneflow.compatible.single_client.ops.user_data_ops import OFRecordImageDecoder
from oneflow.compatible.single_client.ops.user_data_ops import (
OFRecordImageDecoder as ofrecord_image_decoder,
)
from oneflow.compatible.single_client.ops.user_data_ops import OFRecordRawDecoder
from oneflow.compatible.single_client.ops.user_data_ops import (
OFRecordRawDecoder as ofrecord_raw_decoder,
)
from oneflow.compatible.single_client.ops.user_data_ops import OneRecDecoder
from oneflow.compatible.single_client.ops.user_data_ops import (
OneRecDecoder as onerec_decoder,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_coco_reader as coco_reader,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_ofrecord_image_decoder_random_crop as OFRecordImageDecoderRandomCrop,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_ofrecord_image_decoder_random_crop as ofrecord_image_decoder_random_crop,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
gpt_data_loader as MegatronGPTMMapDataLoader,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
gpt_data_loader as megatron_gpt_mmap_data_loader,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
ofrecord_image_classification_reader,
) | python/oneflow/compatible/single_client/data.py | from oneflow.compatible.single_client.experimental.load_mnist import load_mnist
from oneflow.compatible.single_client.ops.data_ops import (
BlobConf,
ImageCodec,
ImagePreprocessor,
ImageResizePreprocessor,
NormByChannelPreprocessor,
RawCodec,
decode_ofrecord,
decode_random,
)
from oneflow.compatible.single_client.ops.data_ops import (
image_decoder_random_crop_resize,
)
from oneflow.compatible.single_client.ops.data_ops import (
image_decoder_random_crop_resize as ImageDecoderRandomCropResize,
)
from oneflow.compatible.single_client.ops.data_ops import (
ofrecord_loader,
ofrecord_reader,
onerec_reader,
)
from oneflow.compatible.single_client.ops.user_data_ops import OFRecordBytesDecoder
from oneflow.compatible.single_client.ops.user_data_ops import (
OFRecordBytesDecoder as ofrecord_bytes_decoder,
)
from oneflow.compatible.single_client.ops.user_data_ops import OFRecordImageDecoder
from oneflow.compatible.single_client.ops.user_data_ops import (
OFRecordImageDecoder as ofrecord_image_decoder,
)
from oneflow.compatible.single_client.ops.user_data_ops import OFRecordRawDecoder
from oneflow.compatible.single_client.ops.user_data_ops import (
OFRecordRawDecoder as ofrecord_raw_decoder,
)
from oneflow.compatible.single_client.ops.user_data_ops import OneRecDecoder
from oneflow.compatible.single_client.ops.user_data_ops import (
OneRecDecoder as onerec_decoder,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_coco_reader as coco_reader,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_ofrecord_image_decoder_random_crop as OFRecordImageDecoderRandomCrop,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_ofrecord_image_decoder_random_crop as ofrecord_image_decoder_random_crop,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
gpt_data_loader as MegatronGPTMMapDataLoader,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
gpt_data_loader as megatron_gpt_mmap_data_loader,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
ofrecord_image_classification_reader,
) | 0.387343 | 0.169028 |
from django.utils import timezone
from rest_framework import serializers
from core.models import User
from cases.models import (
Case,
CaseType,
CaseWorkflow,
)
from cases.models import (
Product,
ExportSource,
Sector,
)
from organisations.models import Organisation
from security.constants import (
SECURITY_GROUP_ORGANISATION_OWNER,
ROLE_APPLICANT,
)
from security.models import OrganisationCaseRole
TEST_EMAIL = "<EMAIL>" # /PS-IGNORE
TEST_PASSWORD = "<PASSWORD>"
class OrganisationSerializer(serializers.ModelSerializer):
class Meta:
model = Organisation
fields = ["id", "name"]
def create_user(email):
"""create a user.
It creates a test organisation for the user.
"""
user = User.objects.create_user(
name="test user",
email=email,
password=<PASSWORD>,
groups=[SECURITY_GROUP_ORGANISATION_OWNER],
country="GB",
timezone="Europe/London",
phone="012345678",
organisation_name="Test Organisation",
organisation_country="GB",
companies_house_id="TE5 TS1",
organisation_address="Test address",
)
return user
class UserSerializer(serializers.ModelSerializer):
organisations = OrganisationSerializer(many=True, read_only=True)
class Meta:
model = User
fields = ["id", "email", "organisations"]
def create(self, validated_data):
email = validated_data.pop("email")
return create_user(email)
class CaseSerializer(serializers.ModelSerializer):
class Meta:
model = Case
fields = ["id", "name"]
def create(self, validated_data):
"""create.
Creates a test Case for an organisation user.
Creates the organisation user if it doesn't exist. The user is the applicant.
Creates all the objects required for a valid case:
workflow
product
export source
The case is initiated so it will be available in the list of cases in public
:param (dict) validated_data: user email.
"""
user_email = validated_data.get("email", TEST_EMAIL)
user_owner = User.objects.filter(email=user_email).first()
if not user_owner:
user_owner = create_user(user_email)
organisation_id = user_owner.organisation.organisation_id
organisation = Organisation.objects.get(pk=organisation_id)
case_type = CaseType.objects.get(acronym="AD")
case = Case.objects.create(name="Test Case", created_by=user_owner, type=case_type)
CaseWorkflow.objects.snapshot_from_template(case, case.type.workflow)
organisation.assign_case(case, ROLE_APPLICANT)
case.assign_organisation_user(user_owner, organisation)
case_id = case.id
# get a sector for the product
sector = Sector.objects.all().first()
# create a product
product = Product.objects.create(
case=case,
name="TP",
sector=sector,
description="Test Product",
)
# and an export source
export_source = ExportSource.objects.create(
case=case, country="AL", last_modified=timezone.now()
)
caserole = OrganisationCaseRole.objects.get(case=case)
caserole.approved_at = timezone.now()
caserole.save()
# set initiated date to make it appears in the list of cases on public
case.initiated_at = timezone.now()
case.save()
return case | trade_remedies_api/api_test/serializers.py | from django.utils import timezone
from rest_framework import serializers
from core.models import User
from cases.models import (
Case,
CaseType,
CaseWorkflow,
)
from cases.models import (
Product,
ExportSource,
Sector,
)
from organisations.models import Organisation
from security.constants import (
SECURITY_GROUP_ORGANISATION_OWNER,
ROLE_APPLICANT,
)
from security.models import OrganisationCaseRole
TEST_EMAIL = "<EMAIL>" # /PS-IGNORE
TEST_PASSWORD = "<PASSWORD>"
class OrganisationSerializer(serializers.ModelSerializer):
class Meta:
model = Organisation
fields = ["id", "name"]
def create_user(email):
"""create a user.
It creates a test organisation for the user.
"""
user = User.objects.create_user(
name="test user",
email=email,
password=<PASSWORD>,
groups=[SECURITY_GROUP_ORGANISATION_OWNER],
country="GB",
timezone="Europe/London",
phone="012345678",
organisation_name="Test Organisation",
organisation_country="GB",
companies_house_id="TE5 TS1",
organisation_address="Test address",
)
return user
class UserSerializer(serializers.ModelSerializer):
organisations = OrganisationSerializer(many=True, read_only=True)
class Meta:
model = User
fields = ["id", "email", "organisations"]
def create(self, validated_data):
email = validated_data.pop("email")
return create_user(email)
class CaseSerializer(serializers.ModelSerializer):
class Meta:
model = Case
fields = ["id", "name"]
def create(self, validated_data):
"""create.
Creates a test Case for an organisation user.
Creates the organisation user if it doesn't exist. The user is the applicant.
Creates all the objects required for a valid case:
workflow
product
export source
The case is initiated so it will be available in the list of cases in public
:param (dict) validated_data: user email.
"""
user_email = validated_data.get("email", TEST_EMAIL)
user_owner = User.objects.filter(email=user_email).first()
if not user_owner:
user_owner = create_user(user_email)
organisation_id = user_owner.organisation.organisation_id
organisation = Organisation.objects.get(pk=organisation_id)
case_type = CaseType.objects.get(acronym="AD")
case = Case.objects.create(name="Test Case", created_by=user_owner, type=case_type)
CaseWorkflow.objects.snapshot_from_template(case, case.type.workflow)
organisation.assign_case(case, ROLE_APPLICANT)
case.assign_organisation_user(user_owner, organisation)
case_id = case.id
# get a sector for the product
sector = Sector.objects.all().first()
# create a product
product = Product.objects.create(
case=case,
name="TP",
sector=sector,
description="Test Product",
)
# and an export source
export_source = ExportSource.objects.create(
case=case, country="AL", last_modified=timezone.now()
)
caserole = OrganisationCaseRole.objects.get(case=case)
caserole.approved_at = timezone.now()
caserole.save()
# set initiated date to make it appears in the list of cases on public
case.initiated_at = timezone.now()
case.save()
return case | 0.553988 | 0.262266 |
from lazy_property import LazyProperty
from datetime import datetime
import re as regex
import json
import requests
from urllib.parse import urlparse, parse_qs
from bs4 import BeautifulSoup, Tag
from markdownify import markdownify
class Notice:
'''
Notice of KNU CSE official notice post
example: https://computer.knu.ac.kr/06_sub/02_sub.html?no=3739&bbs_cmd=view
'''
post_url = 'https://computer.knu.ac.kr/06_sub/02_sub.html'
down_url = "https://computer.knu.ac.kr/pack/bbs/down.php"
def __init__(self, url: str, is_announcement: bool = False):
self.__url = url
self.__is_announcement = is_announcement
query = parse_qs(urlparse(url).query)
self.id = int(query['no'])
@LazyProperty
def __soup(self) -> BeautifulSoup:
response = requests.get(self.url)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
@property
def url(self) -> str:
return f'{Notice.post_url}?no={self.id}&bbs_cmd=view'
@property
def title(self) -> str:
title = self.__soup.find('div', attrs={'class': 'kboard-title'})
return title.text
@LazyProperty
def timestamp(self) -> datetime:
timestamp = (self.__soup
.find('div', attrs={'class':'detail-attr detail-date'})
.find('div', attrs={'class':'detail-value'}).text
)
return datetime.strptime(timestamp, '%Y-%m-%d %H:%M')
@property
def body(self) -> str:
return self.__soup.find('div', attrs={'class':'content-view'}).text
@property
def is_announcement(self) -> bool:
return self.is_announcement
@property
def has_mileage(self) -> bool:
return '마일리지' in self.title or '마일리지' in self.body
def to_markdown(self) -> str:
'''
convert Notice HTML code into markdown
'''
source = self.__soup.find('div', attrs={'id': 'kboard-document'})
document = str(markdownify(source, heading_style="ATX"))
document = (document
.replace('/_files/userfile/image', 'https://computer.knu.ac.kr/_files/userfile/image')
.replace('/pack/bbs/down.php', Notice.down_url)
.replace('?key', Notice.post_url+'?key')
.replace('?bbs_cmd', Notice.post_url+'?bbs_cmd')
.replace(']첨부파일', '] \n\n첨부파일') # 첨부파일 줄바꿈
)
for match in regex.finditer(r'o_name=.+?&', document):
start, end = match.start(), match.end()
document = document[:start] + document[start:end].replace(' ', '') + document[end:]
return document
def to_json(self) -> str:
source = {
self.id, self.title, self.timestamp, self.body
}
return json.dumps(source, indent=2, sort_keys=True)
def __str__(self) -> str:
return '\n'.join([
f'url: {self.url}',
f'\ttitle: {self.title[:10]}...',
f'\tid: {self.id}',
f'\ttimestamp: {self.timestamp}',
f'\tbody: {self.body[:10]}...'
]).expandtabs(4)
def __repr__(self) -> str:
return '\n'.join([
f'url: {self.url}',
f'\ttitle: {self.title}...',
f'\tid: {self.id}',
f'\ttimestamp: {self.timestamp}',
f'\tbody: {self.body}...'
]).expandtabs(4)
class NoticeFactory:
'''
Create Notice of KNU CSE official notice lists \n
example: https://computer.knu.ac.kr/06_sub/02_sub.html
'''
@staticmethod
def __from_notice_list_each(notice: Tag) -> 'Notice':
'''
create Notice from notice list in website \n
example: https://computer.knu.ac.kr/06_sub/02_sub.html
'''
[span.extract() for span in notice.select('span')]
url = Notice.post_url + notice.find('a')['href']
is_announcement = notice.find('th', attrs={'class': 'bbs_num'}).text == '공지'
return Notice(url, is_announcement)
@staticmethod
def from_notice_list() -> list['Notice']:
'''
create a list of Notice from the knu cse notice list \n
example: https://computer.knu.ac.kr/06_sub/02_sub.html
'''
res = requests.get()
soup = BeautifulSoup(res.text, 'html.parser')
notices = soup.find_all('tr') # 일반 공지사항 목록 가져옴
return [NoticeFactory.__from_notice_list_each(notice) for notice in notices] | src/comp_crawling/notice.py | from lazy_property import LazyProperty
from datetime import datetime
import re as regex
import json
import requests
from urllib.parse import urlparse, parse_qs
from bs4 import BeautifulSoup, Tag
from markdownify import markdownify
class Notice:
'''
Notice of KNU CSE official notice post
example: https://computer.knu.ac.kr/06_sub/02_sub.html?no=3739&bbs_cmd=view
'''
post_url = 'https://computer.knu.ac.kr/06_sub/02_sub.html'
down_url = "https://computer.knu.ac.kr/pack/bbs/down.php"
def __init__(self, url: str, is_announcement: bool = False):
self.__url = url
self.__is_announcement = is_announcement
query = parse_qs(urlparse(url).query)
self.id = int(query['no'])
@LazyProperty
def __soup(self) -> BeautifulSoup:
response = requests.get(self.url)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
@property
def url(self) -> str:
return f'{Notice.post_url}?no={self.id}&bbs_cmd=view'
@property
def title(self) -> str:
title = self.__soup.find('div', attrs={'class': 'kboard-title'})
return title.text
@LazyProperty
def timestamp(self) -> datetime:
timestamp = (self.__soup
.find('div', attrs={'class':'detail-attr detail-date'})
.find('div', attrs={'class':'detail-value'}).text
)
return datetime.strptime(timestamp, '%Y-%m-%d %H:%M')
@property
def body(self) -> str:
return self.__soup.find('div', attrs={'class':'content-view'}).text
@property
def is_announcement(self) -> bool:
return self.is_announcement
@property
def has_mileage(self) -> bool:
return '마일리지' in self.title or '마일리지' in self.body
def to_markdown(self) -> str:
'''
convert Notice HTML code into markdown
'''
source = self.__soup.find('div', attrs={'id': 'kboard-document'})
document = str(markdownify(source, heading_style="ATX"))
document = (document
.replace('/_files/userfile/image', 'https://computer.knu.ac.kr/_files/userfile/image')
.replace('/pack/bbs/down.php', Notice.down_url)
.replace('?key', Notice.post_url+'?key')
.replace('?bbs_cmd', Notice.post_url+'?bbs_cmd')
.replace(']첨부파일', '] \n\n첨부파일') # 첨부파일 줄바꿈
)
for match in regex.finditer(r'o_name=.+?&', document):
start, end = match.start(), match.end()
document = document[:start] + document[start:end].replace(' ', '') + document[end:]
return document
def to_json(self) -> str:
source = {
self.id, self.title, self.timestamp, self.body
}
return json.dumps(source, indent=2, sort_keys=True)
def __str__(self) -> str:
return '\n'.join([
f'url: {self.url}',
f'\ttitle: {self.title[:10]}...',
f'\tid: {self.id}',
f'\ttimestamp: {self.timestamp}',
f'\tbody: {self.body[:10]}...'
]).expandtabs(4)
def __repr__(self) -> str:
return '\n'.join([
f'url: {self.url}',
f'\ttitle: {self.title}...',
f'\tid: {self.id}',
f'\ttimestamp: {self.timestamp}',
f'\tbody: {self.body}...'
]).expandtabs(4)
class NoticeFactory:
'''
Create Notice of KNU CSE official notice lists \n
example: https://computer.knu.ac.kr/06_sub/02_sub.html
'''
@staticmethod
def __from_notice_list_each(notice: Tag) -> 'Notice':
'''
create Notice from notice list in website \n
example: https://computer.knu.ac.kr/06_sub/02_sub.html
'''
[span.extract() for span in notice.select('span')]
url = Notice.post_url + notice.find('a')['href']
is_announcement = notice.find('th', attrs={'class': 'bbs_num'}).text == '공지'
return Notice(url, is_announcement)
@staticmethod
def from_notice_list() -> list['Notice']:
'''
create a list of Notice from the knu cse notice list \n
example: https://computer.knu.ac.kr/06_sub/02_sub.html
'''
res = requests.get()
soup = BeautifulSoup(res.text, 'html.parser')
notices = soup.find_all('tr') # 일반 공지사항 목록 가져옴
return [NoticeFactory.__from_notice_list_each(notice) for notice in notices] | 0.500977 | 0.123709 |
from itertools import product
from typing import List, Optional, Dict, Tuple
from matplotlib.axes import Axes
from mpl_format.axes.axes_formatter import AxesFormatter
from mpl_format.text.text_utils import map_text, wrap_text
from pandas import Series, isnull, DataFrame, pivot_table, notnull
from probability.distributions import BetaBinomialConjugate
from seaborn import heatmap
from survey.constants import CATEGORY_SPLITTER
from survey.mixins.data_mixins import MultiCategoryDataMixin
from survey.mixins.data_types.categorical_mixin import CategoricalMixin
from survey.mixins.named import NamedMixin
from survey.questions._abstract.question import Question
from survey.utils.plots import draw_vertical_dividers, draw_horizontal_dividers
class RankedChoiceQuestion(
NamedMixin,
MultiCategoryDataMixin,
CategoricalMixin,
Question
):
"""
A question where Respondents are asked to rank various choices in order of
preference.
e.g. for 10 items 1 is the most favourite and 10 is the least favourite.
"""
def __init__(self, name: str, text: str, categories: List[str],
data: Optional[Series] = None):
"""
Create a new Ranked Choice question.
:param name: A pythonic name for the question.
:param text: The text asked in the question.
:param categories: The list of possible choices.
:param data: Optional pandas Series of responses.
"""
self._set_name_and_text(name, text)
self._set_categories(categories)
self.data = data
def _validate_data(self, data: Series):
unique = set([selection for ix, item in data.iteritems()
for selection in item.split(CATEGORY_SPLITTER)
if notnull(selection)])
errors = []
for unique_val in unique:
if unique_val not in self._categories:
errors.append(f'"{unique_val}" is not in categories.')
if errors:
raise ValueError('\n'.join(errors))
def make_features(self, answers: Series = None,
drop_na: bool = True,
naming: str = '{{name}}: {{choice}}',
normalize: bool = True) -> DataFrame:
"""
Create DataFrame of features for use in ML.
:param answers: Answers to the Question from a Survey. If left as None
then use the Question's attached data.
:param drop_na: Whether to drop null rows (rows where respondent was not
asked a question).
:param naming: Pattern to use to name the columns.
:param normalize: Option to normalize data with min and max approach.
"""
if answers is None:
answers = self._data
if drop_na:
# drop respondents that weren't asked the question
answers = answers.dropna()
feature_list = []
if len(answers) > 0:
# create features
for _, str_selections in answers.iteritems():
feature_dict = {}
selections = str_selections.split(CATEGORY_SPLITTER)
for i in range(len(selections)):
feature_dict.update({selections[i]: i + 1})
feature_list.append(feature_dict)
features = DataFrame(data=feature_list, index=answers.index,
columns=self.categories)
else:
# create empty dataframe with the right columns
features = DataFrame(columns=self.categories, index=answers.index)
# rename columns
features.columns = [
naming.replace('{{name}}', self.name)
.replace('{{choice}}', choice)
for choice in features.columns
]
# normalize
if normalize:
features = (
(features - features.min()) /
(features.max() - features.min())
)
else:
# set datatype
features = features.astype(int)
return features
def significance__one_vs_any(self) -> Series:
"""
Return the probability that one choice is ranked higher than a randomly
selected other choice.
"""
data = self.make_features(naming='{{choice}}')
sums: Series = data.sum()
n = len(data)
results = []
for category in self.categories:
rest = [c for c in self.categories if c != category]
m_one = sums[category]
m_rest = sums[rest].mean()
results.append({
'category': category,
'p': (
BetaBinomialConjugate(
alpha=1, beta=1, n=n, k=m_one).posterior() >
BetaBinomialConjugate(
alpha=1, beta=1, n=n, k=m_rest).posterior()
)
})
return DataFrame(results).set_index('category')['p']
def significance_one_vs_one(self) -> DataFrame:
"""
Return the probability that each choice is ranked higher than each
other.
"""
data = self.make_features(naming='{{choice}}')
sums: Series = data.sum()
n = len(data)
results = []
for category_1, category_2 in product(self.categories, self.categories):
m_1 = sums[category_1]
m_2 = sums[category_2]
results.append({
'category_1': category_1,
'category_2': category_2,
'p': (
BetaBinomialConjugate(
alpha=1, beta=1, n=n, k=m_1).posterior() >
BetaBinomialConjugate(
alpha=1, beta=1, n=n, k=m_2).posterior()
)
})
results_data = DataFrame(results)
pt = pivot_table(data=results_data,
index='category_1', columns='category_2',
values='p')
return pt
def plot_distribution(self, data: Optional[Series] = None,
transpose: bool = False,
normalize: bool = False,
significance: bool = False,
sig_colors: Tuple[str, str] = ('#00ff00', '#ff0000'),
sig_values: Tuple[float, float] = (0.945, 0.055),
label_mappings: Optional[Dict[str, str]] = None,
ax: Optional[Axes] = None) -> Axes:
"""
Plot the distribution of answers to the Question.
:param data: The answers given by Respondents to the Question.
:param transpose: Whether to transpose the labels to the y-axis.
:param normalize: Whether to normalize number of responses in each
position to total number of responses.
:param significance: Whether to highlight significant choices.
:param sig_colors: Tuple of (high, low) colors for highlighting
significance.
:param sig_values: Tuple of (high, low) values for assessing
significance.
:param label_mappings: Optional dict of replacements for labels.
:param ax: Optional matplotlib axes to plot on.
"""
data = data if data is not None else self._data
if data is None:
raise ValueError('No data!')
order_counts = []
for index, str_user_order in data.iteritems():
if isnull(str_user_order):
continue
user_order = str_user_order.split(CATEGORY_SPLITTER)
for i in range(len(user_order)):
order_counts.append({
'choice': user_order[i],
'rank': i + 1,
})
counts = DataFrame(order_counts).groupby([
'choice', 'rank'
]).size().reset_index().rename(columns={0: 'count'})
pivot = pivot_table(
data=counts, index='choice',
columns='rank', values='count'
).reindex(self.categories)
pivot.index = wrap_text(map_text(pivot.index,
mapping=label_mappings or {}))
if normalize:
fmt = '.2f'
pivot = pivot / len(data)
else:
fmt = '.0f'
if transpose:
pivot = pivot.T
axf = AxesFormatter(axes=ax)
ax = axf.axes
heatmap(data=pivot, annot=True, fmt=fmt, cmap='Blues', ax=ax)
if significance:
cat_sigs = self.significance__one_vs_any()
for category, sig_value in cat_sigs.iteritems():
if sig_values[1] < sig_value < sig_values[0]:
continue
elif sig_value <= sig_values[1]:
color = sig_colors[1]
elif sig_value >= sig_values[0]:
color = sig_colors[0]
if not transpose:
x_min = 0.1
x_max = len(self.categories) - 0.1
y_min = self.categories.index(category) + 0.1
y_max = self.categories.index(category) + 0.9
else:
y_min = 0.1
y_max = len(self.categories) - 0.1
x_min = self.categories.index(category) + 0.1
x_max = self.categories.index(category) + 0.9
ax.plot(
[x_min, x_max, x_max, x_min, x_min],
[y_min, y_min, y_max, y_max, y_min],
color=color, linewidth=2
)
axf.x_axis.tick_labels.set_ha_center()
axf.y_axis.tick_labels.set_va_center()
if transpose:
draw_vertical_dividers(ax)
else:
draw_horizontal_dividers(ax)
axf.set_title_text(self.text)
return ax
def distribution_table(self, data: Optional[Series] = None,
significance: bool = False) -> DataFrame:
"""
Create a table of the distribution of responses.
:param data: The answers given by Respondents to the Question.
:param significance: Whether to calculate significance for the
responses.
"""
data = data if data is not None else self._data
if data is None:
raise ValueError('No data!')
order_counts = []
for index, str_user_order in data.iteritems():
if isnull(str_user_order):
continue
user_order = str_user_order.split(CATEGORY_SPLITTER)
for i in range(len(user_order)):
order_counts.append({
'Choice': user_order[i],
'Rank': i + 1,
})
counts = DataFrame(order_counts).groupby([
'Choice', 'Rank'
]).size().reset_index().rename(columns={0: 'Count'})
pivot = pivot_table(
data=counts, index='Choice',
columns='Rank', values='Count'
).reindex(self.categories)
if significance:
pivot['Significance'] = self.significance__one_vs_any()
return pivot
def __repr__(self):
choices = ', '.join(f"'{choice}'" for choice in self._categories)
return (
f"RankedChoiceQuestion(\n"
f"\tname='{self.name}',\n"
f"\tchoices=[{choices}]\n"
f")"
) | survey/questions/ranked_choice_question.py | from itertools import product
from typing import List, Optional, Dict, Tuple
from matplotlib.axes import Axes
from mpl_format.axes.axes_formatter import AxesFormatter
from mpl_format.text.text_utils import map_text, wrap_text
from pandas import Series, isnull, DataFrame, pivot_table, notnull
from probability.distributions import BetaBinomialConjugate
from seaborn import heatmap
from survey.constants import CATEGORY_SPLITTER
from survey.mixins.data_mixins import MultiCategoryDataMixin
from survey.mixins.data_types.categorical_mixin import CategoricalMixin
from survey.mixins.named import NamedMixin
from survey.questions._abstract.question import Question
from survey.utils.plots import draw_vertical_dividers, draw_horizontal_dividers
class RankedChoiceQuestion(
NamedMixin,
MultiCategoryDataMixin,
CategoricalMixin,
Question
):
"""
A question where Respondents are asked to rank various choices in order of
preference.
e.g. for 10 items 1 is the most favourite and 10 is the least favourite.
"""
def __init__(self, name: str, text: str, categories: List[str],
data: Optional[Series] = None):
"""
Create a new Ranked Choice question.
:param name: A pythonic name for the question.
:param text: The text asked in the question.
:param categories: The list of possible choices.
:param data: Optional pandas Series of responses.
"""
self._set_name_and_text(name, text)
self._set_categories(categories)
self.data = data
def _validate_data(self, data: Series):
unique = set([selection for ix, item in data.iteritems()
for selection in item.split(CATEGORY_SPLITTER)
if notnull(selection)])
errors = []
for unique_val in unique:
if unique_val not in self._categories:
errors.append(f'"{unique_val}" is not in categories.')
if errors:
raise ValueError('\n'.join(errors))
def make_features(self, answers: Series = None,
drop_na: bool = True,
naming: str = '{{name}}: {{choice}}',
normalize: bool = True) -> DataFrame:
"""
Create DataFrame of features for use in ML.
:param answers: Answers to the Question from a Survey. If left as None
then use the Question's attached data.
:param drop_na: Whether to drop null rows (rows where respondent was not
asked a question).
:param naming: Pattern to use to name the columns.
:param normalize: Option to normalize data with min and max approach.
"""
if answers is None:
answers = self._data
if drop_na:
# drop respondents that weren't asked the question
answers = answers.dropna()
feature_list = []
if len(answers) > 0:
# create features
for _, str_selections in answers.iteritems():
feature_dict = {}
selections = str_selections.split(CATEGORY_SPLITTER)
for i in range(len(selections)):
feature_dict.update({selections[i]: i + 1})
feature_list.append(feature_dict)
features = DataFrame(data=feature_list, index=answers.index,
columns=self.categories)
else:
# create empty dataframe with the right columns
features = DataFrame(columns=self.categories, index=answers.index)
# rename columns
features.columns = [
naming.replace('{{name}}', self.name)
.replace('{{choice}}', choice)
for choice in features.columns
]
# normalize
if normalize:
features = (
(features - features.min()) /
(features.max() - features.min())
)
else:
# set datatype
features = features.astype(int)
return features
def significance__one_vs_any(self) -> Series:
"""
Return the probability that one choice is ranked higher than a randomly
selected other choice.
"""
data = self.make_features(naming='{{choice}}')
sums: Series = data.sum()
n = len(data)
results = []
for category in self.categories:
rest = [c for c in self.categories if c != category]
m_one = sums[category]
m_rest = sums[rest].mean()
results.append({
'category': category,
'p': (
BetaBinomialConjugate(
alpha=1, beta=1, n=n, k=m_one).posterior() >
BetaBinomialConjugate(
alpha=1, beta=1, n=n, k=m_rest).posterior()
)
})
return DataFrame(results).set_index('category')['p']
def significance_one_vs_one(self) -> DataFrame:
"""
Return the probability that each choice is ranked higher than each
other.
"""
data = self.make_features(naming='{{choice}}')
sums: Series = data.sum()
n = len(data)
results = []
for category_1, category_2 in product(self.categories, self.categories):
m_1 = sums[category_1]
m_2 = sums[category_2]
results.append({
'category_1': category_1,
'category_2': category_2,
'p': (
BetaBinomialConjugate(
alpha=1, beta=1, n=n, k=m_1).posterior() >
BetaBinomialConjugate(
alpha=1, beta=1, n=n, k=m_2).posterior()
)
})
results_data = DataFrame(results)
pt = pivot_table(data=results_data,
index='category_1', columns='category_2',
values='p')
return pt
def plot_distribution(self, data: Optional[Series] = None,
transpose: bool = False,
normalize: bool = False,
significance: bool = False,
sig_colors: Tuple[str, str] = ('#00ff00', '#ff0000'),
sig_values: Tuple[float, float] = (0.945, 0.055),
label_mappings: Optional[Dict[str, str]] = None,
ax: Optional[Axes] = None) -> Axes:
"""
Plot the distribution of answers to the Question.
:param data: The answers given by Respondents to the Question.
:param transpose: Whether to transpose the labels to the y-axis.
:param normalize: Whether to normalize number of responses in each
position to total number of responses.
:param significance: Whether to highlight significant choices.
:param sig_colors: Tuple of (high, low) colors for highlighting
significance.
:param sig_values: Tuple of (high, low) values for assessing
significance.
:param label_mappings: Optional dict of replacements for labels.
:param ax: Optional matplotlib axes to plot on.
"""
data = data if data is not None else self._data
if data is None:
raise ValueError('No data!')
order_counts = []
for index, str_user_order in data.iteritems():
if isnull(str_user_order):
continue
user_order = str_user_order.split(CATEGORY_SPLITTER)
for i in range(len(user_order)):
order_counts.append({
'choice': user_order[i],
'rank': i + 1,
})
counts = DataFrame(order_counts).groupby([
'choice', 'rank'
]).size().reset_index().rename(columns={0: 'count'})
pivot = pivot_table(
data=counts, index='choice',
columns='rank', values='count'
).reindex(self.categories)
pivot.index = wrap_text(map_text(pivot.index,
mapping=label_mappings or {}))
if normalize:
fmt = '.2f'
pivot = pivot / len(data)
else:
fmt = '.0f'
if transpose:
pivot = pivot.T
axf = AxesFormatter(axes=ax)
ax = axf.axes
heatmap(data=pivot, annot=True, fmt=fmt, cmap='Blues', ax=ax)
if significance:
cat_sigs = self.significance__one_vs_any()
for category, sig_value in cat_sigs.iteritems():
if sig_values[1] < sig_value < sig_values[0]:
continue
elif sig_value <= sig_values[1]:
color = sig_colors[1]
elif sig_value >= sig_values[0]:
color = sig_colors[0]
if not transpose:
x_min = 0.1
x_max = len(self.categories) - 0.1
y_min = self.categories.index(category) + 0.1
y_max = self.categories.index(category) + 0.9
else:
y_min = 0.1
y_max = len(self.categories) - 0.1
x_min = self.categories.index(category) + 0.1
x_max = self.categories.index(category) + 0.9
ax.plot(
[x_min, x_max, x_max, x_min, x_min],
[y_min, y_min, y_max, y_max, y_min],
color=color, linewidth=2
)
axf.x_axis.tick_labels.set_ha_center()
axf.y_axis.tick_labels.set_va_center()
if transpose:
draw_vertical_dividers(ax)
else:
draw_horizontal_dividers(ax)
axf.set_title_text(self.text)
return ax
def distribution_table(self, data: Optional[Series] = None,
significance: bool = False) -> DataFrame:
"""
Create a table of the distribution of responses.
:param data: The answers given by Respondents to the Question.
:param significance: Whether to calculate significance for the
responses.
"""
data = data if data is not None else self._data
if data is None:
raise ValueError('No data!')
order_counts = []
for index, str_user_order in data.iteritems():
if isnull(str_user_order):
continue
user_order = str_user_order.split(CATEGORY_SPLITTER)
for i in range(len(user_order)):
order_counts.append({
'Choice': user_order[i],
'Rank': i + 1,
})
counts = DataFrame(order_counts).groupby([
'Choice', 'Rank'
]).size().reset_index().rename(columns={0: 'Count'})
pivot = pivot_table(
data=counts, index='Choice',
columns='Rank', values='Count'
).reindex(self.categories)
if significance:
pivot['Significance'] = self.significance__one_vs_any()
return pivot
def __repr__(self):
choices = ', '.join(f"'{choice}'" for choice in self._categories)
return (
f"RankedChoiceQuestion(\n"
f"\tname='{self.name}',\n"
f"\tchoices=[{choices}]\n"
f")"
) | 0.907929 | 0.429071 |
import numpy as np
from dissimilarity import compute_dissimilarity, dissimilarity
from dipy.tracking.distances import bundles_distances_mam
from sklearn.neighbors import KDTree
from nibabel import trackvis
from dipy.tracking.utils import length
from dipy.viz import fvtk
import os
import vtk.util.colors as colors
try:
from linear_assignment import LinearAssignment
except ImportError:
print("WARNING: Cythonized LAPJV not available. Falling back to Python.")
print("WARNING: See README.txt")
try:
from joblib import Parallel, delayed
joblib_available = True
except ImportError:
joblib_available = False
def show_tract(segmented_tract, color):
"""Visualization of the segmented tract.
"""
ren = fvtk.ren()
fvtk.add(ren, fvtk.line(segmented_tract.tolist(),
colors=color,
linewidth=2,
opacity=0.3))
fvtk.show(ren)
fvtk.clear(ren)
def ranking_schema(superset_estimated_target_tract_idx,superset_estimated_target_tract_cost):
""" Rank all the extracted streamlines estimated by the LAP with different examples (superset)
accoring to the number of times it selected and the total cost
"""
idxs = np.unique(superset_estimated_target_tract_idx)
how_many_times_selected = np.array([(superset_estimated_target_tract_idx == idx).sum() for idx in idxs])
how_much_cost = np.array([((superset_estimated_target_tract_idx == idx)*superset_estimated_target_tract_cost).sum() for idx in idxs])
ranking = np.argsort(how_many_times_selected)[::-1]
tmp = np.unique(how_many_times_selected)[::-1]
for i in tmp:
tmp1 = (how_many_times_selected == i)
tmp2 = np.where(tmp1)[0]
if tmp2.size > 1:
tmp3 = np.argsort(how_much_cost[tmp2])
ranking[how_many_times_selected[ranking]==i] = tmp2[tmp3]
return idxs[ranking]
def load(T_filename, threshold_short_streamlines=10.0):
"""Load tractogram from TRK file and remove short streamlines with
length below threshold.
"""
print("Loading %s" % T_filename)
T, hdr = trackvis.read(T_filename, as_generator=False)
T = np.array([s[0] for s in T], dtype=np.object)
print("%s: %s streamlines" % (T_filename, len(T)))
# Removing short artifactual streamlines
print("Removing (presumably artifactual) streamlines shorter than %s" % threshold_short_streamlines)
T = np.array([s for s in T if length(s) >= threshold_short_streamlines], dtype=np.object)
print("%s: %s streamlines" % (T_filename, len(T)))
return T, hdr
def compute_kdtree_and_dr_tractogram( tractogram, num_prototypes=None):
"""Compute the dissimilarity representation of the target tractogram and
build the kd-tree.
"""
tractogram = np.array(tractogram, dtype=np.object)
print("Computing dissimilarity matrices")
if num_prototypes is None:
num_prototypes = 40
print("Using %s prototypes as in Olivetti et al. 2012"
% num_prototypes)
print("Using %s prototypes" % num_prototypes)
dm_tractogram, prototype_idx = compute_dissimilarity(tractogram,
num_prototypes=num_prototypes,
distance= bundles_distances_mam,
prototype_policy='sff',
n_jobs=-1,
verbose=False)
prototypes = tractogram[prototype_idx]
print("Building the KD-tree of tractogram")
kdt = KDTree(dm_tractogram)
return kdt, prototypes
def NN(kdt, dm_E_t, num_NN ):
"""Code for efficient nearest neighbors computation.
"""
D, I = kdt.query(dm_E_t, k=num_NN)
if num_NN==1:
return I.squeeze(), D.squeeze(), dm_E_t.shape[0]
else:
return np.unique(I.flat)
def bundles_distances_mam_smarter_faster(A, B, n_jobs=-1, chunk_size=100):
"""Parallel version of bundles_distances_mam that also avoids
computing distances twice.
"""
lenA = len(A)
chunks = chunker(A, chunk_size)
if B is None:
dm = np.empty((lenA, lenA), dtype=np.float32)
dm[np.diag_indices(lenA)] = 0.0
results = Parallel(n_jobs=-1)(delayed(bundles_distances_mam)(ss, A[i*chunk_size+1:]) for i, ss in enumerate(chunks))
# Fill triu
for i, res in enumerate(results):
dm[(i*chunk_size):((i+1)*chunk_size), (i*chunk_size+1):] = res
# Copy triu to trid:
rows, cols = np.triu_indices(lenA, 1)
dm[cols, rows] = dm[rows, cols]
else:
dm = np.vstack(Parallel(n_jobs=n_jobs)(delayed(bundles_distances_mam)(ss, B) for ss in chunks))
return dm
def chunker(seq, size):
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
def tract_segmentation_single_example_lap (kdt_T_A, prototypes_T_A,sid, num_NN,T_A ):
""" step 1: tract segmentation from a single example using Jonker-Volgenant algorithm (LAPJV)
"""
E_t_filename= 'data/example/'+ str(sid) +'_'+str(tract_name)+'.trk'
print("Loading Example tract: %s" % E_t_filename)
E_t, hdr= load(E_t_filename, threshold_short_streamlines=threshold_short_streamlines)
dm_E_t= dissimilarity(E_t, prototypes_T_A,bundles_distances_mam)
#compute the NN of the example tract in order to construcse the cost matrix
NN_E_t_NN_Idx= NN (kdt_T_A, dm_E_t,num_NN)
print("Computing the cost matrix with mam distance (%s x %s) for RLAP " % (len(E_t),
len( NN_E_t_NN_Idx)))
cost_matrix = bundles_distances_mam_smarter_faster(E_t,
T_A[NN_E_t_NN_Idx])
print("Computing optimal assignmnet with LAPJV")
assignment = LinearAssignment(cost_matrix).solution
min_cost_values= cost_matrix[np.arange(len(cost_matrix)), assignment]
return NN_E_t_NN_Idx[assignment], min_cost_values, len(E_t)
def tract_correspondence_multiple_example_lap (kdt_T_A, prototypes_T_A,example_sunject_id_list, num_NN ):
""" step:2 tracts generated from each example are merged together and then filtered
in order to obtain the final segmentation of the desired tract
"""
print("Extracting the estimated target tract (superset) using the RLAP")
n_jobs=-1
result_LAP= np.array(Parallel(n_jobs=n_jobs)(delayed(tract_segmentation_single_example_lap)(kdt_T_A, prototypes_T_A,sid, num_NN,T_A ) for sid in example_sunject_id_list ))
superset_estimated_correspondence_tract_idx= np.hstack(result_LAP[:,0])
superset_estimated_correspondence_tract_cost= np.hstack(result_LAP[:,1])
example_tract_len_med=np.median(np.hstack(result_LAP[:,2]))
print("Ranking the estimated target (superset) tract.")
superset_estimated_correspondence_tract_idx_ranked=ranking_schema(superset_estimated_correspondence_tract_idx,
superset_estimated_correspondence_tract_cost)
print("Extracting the estimated target tract (until the median size (in terms of number of streamlines) of all the tracts from the example).")
superset_estimated_correspondence_tract_idx_ranked_med=superset_estimated_correspondence_tract_idx_ranked[0:int(example_tract_len_med)]
segmented_tract_LAP=T_A [ superset_estimated_correspondence_tract_idx_ranked_med]
print("Saving the estimated target (superset) (.trk)")
prefix="lap"
save_trk( tract_name,
test_tractogram,
segmented_tract_LAP,
hdr,
prefix)
print("Show the tract")
color= colors.blue
show_tract(segmented_tract_LAP,color)
def tract_segmentation_single_example_NN (kdt_T_A, prototypes_T_A,sid, num_NN,T_A ):
""" step:1 tract segmentation from single example using lapjv
"""
E_t_filename= 'data/example/'+ str(sid) +'_'+str(tract_name)+'.trk'
print("Loading Example tract: %s" % E_t_filename)
E_t, hdr= load(E_t_filename, threshold_short_streamlines=threshold_short_streamlines)
dm_E_t= dissimilarity(E_t, prototypes_T_A,bundles_distances_mam)
#compute the NN of the example tract in order to construcse the cost matrix
assignmnet, min_cost_value, len_E_T = NN (kdt_T_A, dm_E_t,num_NN)
return assignmnet, min_cost_value, len_E_T
def tract_correspondence_multiple_example_NN (kdt_T_A, prototypes_T_A,example_subject_id_list,num_NN ):
""" step:2 tract segmentation using multiple example
"""
print("Extracting the estimated target tract (superset) using the RLAP")
n_jobs=-1
result_NN= np.array(Parallel(n_jobs=n_jobs)(delayed(tract_segmentation_single_example_NN)(kdt_T_A, prototypes_T_A,sid, num_NN,T_A ) for sid in example_subject_id_list ))#euclidean
superset_estimated_correspondence_tract_idx= np.hstack(result_NN[:,0])
superset_estimated_correspondence_tract_cost= np.hstack(result_NN[:,1])
example_tract_len_med=np.median(np.hstack(result_NN[:,2]))
print("Ranking the estimated target (superset) tract.")
superset_estimated_correspondence_tract_idx_ranked=ranking_schema(superset_estimated_correspondence_tract_idx,
superset_estimated_correspondence_tract_cost)
print("Extracting the estimated target tract (until the median size (in terms of number of streamlines) of all the tracts from the example).")
superset_estimated_correspondence_tract_idx_ranked_med=superset_estimated_correspondence_tract_idx_ranked[0:int(example_tract_len_med)]
segmented_tract_NN=T_A [ superset_estimated_correspondence_tract_idx_ranked_med]
print len (segmented_tract_NN)
print("Saving the estimated target (superset) (.trk)")
prefix="NN"
save_trk(tract_name,
test_tractogram,
segmented_tract_NN,
hdr,
prefix)
print("Show the tract")
color= colors.green
show_tract(segmented_tract_NN,
color)
def save_trk(tract_name, test_tractogram, segmented_tract_LAP, hdr, prefix):
"""Save the segmented tract estimated from the LAP
"""
filedir = os.path.dirname('data/segmented_tract/')
if not os.path.exists(filedir):
os.makedirs(filedir)
save_segmented_tract_LAP_filename = '%s/%s_%s_%s.trk'%\
(filedir, test_tractogram, tract_name, prefix)
strmR_A = ((sl, None, None) for sl in segmented_tract_LAP )
trackvis.write( save_segmented_tract_LAP_filename ,strmR_A , hdr)
if __name__ == '__main__':
print(__doc__)
np.random.seed(0)
# test tractogram
test_tractogram = "100307"
T_A_filename = 'data/test_tractogram/tractogram_b1k_1.25mm_csd_wm_mask_eudx1M.trk'
# Main parameters:
threshold_short_streamlines = 0.0 # Beware: discarding streamlines affects IDs
num_NN_lap = 500 # number of nesrest neighbour in order to sparsify the cost matrix.
num_example= 3
num_prototypes=40
num_NN=1
tract_name= "uf.left"
example_subject_id_list= ["100408", "128632", "103414"]
# 1) load test tractogram, T_A
T_A, hdr = load(T_A_filename, threshold_short_streamlines=threshold_short_streamlines)
# 2) Compute the dissimilarity representation of T_A
print("Computing the dissimilarity representation and KD-tree.")
kdt_T_A, prototypes_T_A = compute_kdtree_and_dr_tractogram( T_A,
num_prototypes)
print("Segmenting tract with NN")
tract_correspondence_multiple_example_NN (kdt_T_A,
prototypes_T_A,
example_subject_id_list,
num_NN=num_NN )
print("Segmenting tract with lap")
tract_correspondence_multiple_example_lap (kdt_T_A,
prototypes_T_A,
example_subject_id_list,
num_NN=num_NN_lap ) | segmentation_as_NN_and_lap.py | import numpy as np
from dissimilarity import compute_dissimilarity, dissimilarity
from dipy.tracking.distances import bundles_distances_mam
from sklearn.neighbors import KDTree
from nibabel import trackvis
from dipy.tracking.utils import length
from dipy.viz import fvtk
import os
import vtk.util.colors as colors
try:
from linear_assignment import LinearAssignment
except ImportError:
print("WARNING: Cythonized LAPJV not available. Falling back to Python.")
print("WARNING: See README.txt")
try:
from joblib import Parallel, delayed
joblib_available = True
except ImportError:
joblib_available = False
def show_tract(segmented_tract, color):
"""Visualization of the segmented tract.
"""
ren = fvtk.ren()
fvtk.add(ren, fvtk.line(segmented_tract.tolist(),
colors=color,
linewidth=2,
opacity=0.3))
fvtk.show(ren)
fvtk.clear(ren)
def ranking_schema(superset_estimated_target_tract_idx,superset_estimated_target_tract_cost):
""" Rank all the extracted streamlines estimated by the LAP with different examples (superset)
accoring to the number of times it selected and the total cost
"""
idxs = np.unique(superset_estimated_target_tract_idx)
how_many_times_selected = np.array([(superset_estimated_target_tract_idx == idx).sum() for idx in idxs])
how_much_cost = np.array([((superset_estimated_target_tract_idx == idx)*superset_estimated_target_tract_cost).sum() for idx in idxs])
ranking = np.argsort(how_many_times_selected)[::-1]
tmp = np.unique(how_many_times_selected)[::-1]
for i in tmp:
tmp1 = (how_many_times_selected == i)
tmp2 = np.where(tmp1)[0]
if tmp2.size > 1:
tmp3 = np.argsort(how_much_cost[tmp2])
ranking[how_many_times_selected[ranking]==i] = tmp2[tmp3]
return idxs[ranking]
def load(T_filename, threshold_short_streamlines=10.0):
"""Load tractogram from TRK file and remove short streamlines with
length below threshold.
"""
print("Loading %s" % T_filename)
T, hdr = trackvis.read(T_filename, as_generator=False)
T = np.array([s[0] for s in T], dtype=np.object)
print("%s: %s streamlines" % (T_filename, len(T)))
# Removing short artifactual streamlines
print("Removing (presumably artifactual) streamlines shorter than %s" % threshold_short_streamlines)
T = np.array([s for s in T if length(s) >= threshold_short_streamlines], dtype=np.object)
print("%s: %s streamlines" % (T_filename, len(T)))
return T, hdr
def compute_kdtree_and_dr_tractogram( tractogram, num_prototypes=None):
"""Compute the dissimilarity representation of the target tractogram and
build the kd-tree.
"""
tractogram = np.array(tractogram, dtype=np.object)
print("Computing dissimilarity matrices")
if num_prototypes is None:
num_prototypes = 40
print("Using %s prototypes as in Olivetti et al. 2012"
% num_prototypes)
print("Using %s prototypes" % num_prototypes)
dm_tractogram, prototype_idx = compute_dissimilarity(tractogram,
num_prototypes=num_prototypes,
distance= bundles_distances_mam,
prototype_policy='sff',
n_jobs=-1,
verbose=False)
prototypes = tractogram[prototype_idx]
print("Building the KD-tree of tractogram")
kdt = KDTree(dm_tractogram)
return kdt, prototypes
def NN(kdt, dm_E_t, num_NN ):
"""Code for efficient nearest neighbors computation.
"""
D, I = kdt.query(dm_E_t, k=num_NN)
if num_NN==1:
return I.squeeze(), D.squeeze(), dm_E_t.shape[0]
else:
return np.unique(I.flat)
def bundles_distances_mam_smarter_faster(A, B, n_jobs=-1, chunk_size=100):
"""Parallel version of bundles_distances_mam that also avoids
computing distances twice.
"""
lenA = len(A)
chunks = chunker(A, chunk_size)
if B is None:
dm = np.empty((lenA, lenA), dtype=np.float32)
dm[np.diag_indices(lenA)] = 0.0
results = Parallel(n_jobs=-1)(delayed(bundles_distances_mam)(ss, A[i*chunk_size+1:]) for i, ss in enumerate(chunks))
# Fill triu
for i, res in enumerate(results):
dm[(i*chunk_size):((i+1)*chunk_size), (i*chunk_size+1):] = res
# Copy triu to trid:
rows, cols = np.triu_indices(lenA, 1)
dm[cols, rows] = dm[rows, cols]
else:
dm = np.vstack(Parallel(n_jobs=n_jobs)(delayed(bundles_distances_mam)(ss, B) for ss in chunks))
return dm
def chunker(seq, size):
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
def tract_segmentation_single_example_lap (kdt_T_A, prototypes_T_A,sid, num_NN,T_A ):
""" step 1: tract segmentation from a single example using Jonker-Volgenant algorithm (LAPJV)
"""
E_t_filename= 'data/example/'+ str(sid) +'_'+str(tract_name)+'.trk'
print("Loading Example tract: %s" % E_t_filename)
E_t, hdr= load(E_t_filename, threshold_short_streamlines=threshold_short_streamlines)
dm_E_t= dissimilarity(E_t, prototypes_T_A,bundles_distances_mam)
#compute the NN of the example tract in order to construcse the cost matrix
NN_E_t_NN_Idx= NN (kdt_T_A, dm_E_t,num_NN)
print("Computing the cost matrix with mam distance (%s x %s) for RLAP " % (len(E_t),
len( NN_E_t_NN_Idx)))
cost_matrix = bundles_distances_mam_smarter_faster(E_t,
T_A[NN_E_t_NN_Idx])
print("Computing optimal assignmnet with LAPJV")
assignment = LinearAssignment(cost_matrix).solution
min_cost_values= cost_matrix[np.arange(len(cost_matrix)), assignment]
return NN_E_t_NN_Idx[assignment], min_cost_values, len(E_t)
def tract_correspondence_multiple_example_lap (kdt_T_A, prototypes_T_A,example_sunject_id_list, num_NN ):
""" step:2 tracts generated from each example are merged together and then filtered
in order to obtain the final segmentation of the desired tract
"""
print("Extracting the estimated target tract (superset) using the RLAP")
n_jobs=-1
result_LAP= np.array(Parallel(n_jobs=n_jobs)(delayed(tract_segmentation_single_example_lap)(kdt_T_A, prototypes_T_A,sid, num_NN,T_A ) for sid in example_sunject_id_list ))
superset_estimated_correspondence_tract_idx= np.hstack(result_LAP[:,0])
superset_estimated_correspondence_tract_cost= np.hstack(result_LAP[:,1])
example_tract_len_med=np.median(np.hstack(result_LAP[:,2]))
print("Ranking the estimated target (superset) tract.")
superset_estimated_correspondence_tract_idx_ranked=ranking_schema(superset_estimated_correspondence_tract_idx,
superset_estimated_correspondence_tract_cost)
print("Extracting the estimated target tract (until the median size (in terms of number of streamlines) of all the tracts from the example).")
superset_estimated_correspondence_tract_idx_ranked_med=superset_estimated_correspondence_tract_idx_ranked[0:int(example_tract_len_med)]
segmented_tract_LAP=T_A [ superset_estimated_correspondence_tract_idx_ranked_med]
print("Saving the estimated target (superset) (.trk)")
prefix="lap"
save_trk( tract_name,
test_tractogram,
segmented_tract_LAP,
hdr,
prefix)
print("Show the tract")
color= colors.blue
show_tract(segmented_tract_LAP,color)
def tract_segmentation_single_example_NN (kdt_T_A, prototypes_T_A,sid, num_NN,T_A ):
""" step:1 tract segmentation from single example using lapjv
"""
E_t_filename= 'data/example/'+ str(sid) +'_'+str(tract_name)+'.trk'
print("Loading Example tract: %s" % E_t_filename)
E_t, hdr= load(E_t_filename, threshold_short_streamlines=threshold_short_streamlines)
dm_E_t= dissimilarity(E_t, prototypes_T_A,bundles_distances_mam)
#compute the NN of the example tract in order to construcse the cost matrix
assignmnet, min_cost_value, len_E_T = NN (kdt_T_A, dm_E_t,num_NN)
return assignmnet, min_cost_value, len_E_T
def tract_correspondence_multiple_example_NN (kdt_T_A, prototypes_T_A,example_subject_id_list,num_NN ):
""" step:2 tract segmentation using multiple example
"""
print("Extracting the estimated target tract (superset) using the RLAP")
n_jobs=-1
result_NN= np.array(Parallel(n_jobs=n_jobs)(delayed(tract_segmentation_single_example_NN)(kdt_T_A, prototypes_T_A,sid, num_NN,T_A ) for sid in example_subject_id_list ))#euclidean
superset_estimated_correspondence_tract_idx= np.hstack(result_NN[:,0])
superset_estimated_correspondence_tract_cost= np.hstack(result_NN[:,1])
example_tract_len_med=np.median(np.hstack(result_NN[:,2]))
print("Ranking the estimated target (superset) tract.")
superset_estimated_correspondence_tract_idx_ranked=ranking_schema(superset_estimated_correspondence_tract_idx,
superset_estimated_correspondence_tract_cost)
print("Extracting the estimated target tract (until the median size (in terms of number of streamlines) of all the tracts from the example).")
superset_estimated_correspondence_tract_idx_ranked_med=superset_estimated_correspondence_tract_idx_ranked[0:int(example_tract_len_med)]
segmented_tract_NN=T_A [ superset_estimated_correspondence_tract_idx_ranked_med]
print len (segmented_tract_NN)
print("Saving the estimated target (superset) (.trk)")
prefix="NN"
save_trk(tract_name,
test_tractogram,
segmented_tract_NN,
hdr,
prefix)
print("Show the tract")
color= colors.green
show_tract(segmented_tract_NN,
color)
def save_trk(tract_name, test_tractogram, segmented_tract_LAP, hdr, prefix):
"""Save the segmented tract estimated from the LAP
"""
filedir = os.path.dirname('data/segmented_tract/')
if not os.path.exists(filedir):
os.makedirs(filedir)
save_segmented_tract_LAP_filename = '%s/%s_%s_%s.trk'%\
(filedir, test_tractogram, tract_name, prefix)
strmR_A = ((sl, None, None) for sl in segmented_tract_LAP )
trackvis.write( save_segmented_tract_LAP_filename ,strmR_A , hdr)
if __name__ == '__main__':
print(__doc__)
np.random.seed(0)
# test tractogram
test_tractogram = "100307"
T_A_filename = 'data/test_tractogram/tractogram_b1k_1.25mm_csd_wm_mask_eudx1M.trk'
# Main parameters:
threshold_short_streamlines = 0.0 # Beware: discarding streamlines affects IDs
num_NN_lap = 500 # number of nesrest neighbour in order to sparsify the cost matrix.
num_example= 3
num_prototypes=40
num_NN=1
tract_name= "uf.left"
example_subject_id_list= ["100408", "128632", "103414"]
# 1) load test tractogram, T_A
T_A, hdr = load(T_A_filename, threshold_short_streamlines=threshold_short_streamlines)
# 2) Compute the dissimilarity representation of T_A
print("Computing the dissimilarity representation and KD-tree.")
kdt_T_A, prototypes_T_A = compute_kdtree_and_dr_tractogram( T_A,
num_prototypes)
print("Segmenting tract with NN")
tract_correspondence_multiple_example_NN (kdt_T_A,
prototypes_T_A,
example_subject_id_list,
num_NN=num_NN )
print("Segmenting tract with lap")
tract_correspondence_multiple_example_lap (kdt_T_A,
prototypes_T_A,
example_subject_id_list,
num_NN=num_NN_lap ) | 0.52074 | 0.361897 |
import unittest
import jax.numpy as np
from jax import random
from jax.experimental import stax
import flows
def is_bijective(
test, init_fun, inputs=random.uniform(random.PRNGKey(0), (20, 4), minval=-10.0, maxval=10.0), tol=1e-3
):
input_dim = inputs.shape[1]
params, direct_fun, inverse_fun = init_fun(random.PRNGKey(0), input_dim)
mapped_inputs = direct_fun(params, inputs)[0]
reconstructed_inputs = inverse_fun(params, mapped_inputs)[0]
test.assertTrue(np.allclose(inputs, reconstructed_inputs, atol=tol))
def returns_correct_shape(
test, init_fun, inputs=random.uniform(random.PRNGKey(0), (20, 4), minval=-10.0, maxval=10.0)
):
input_dim = inputs.shape[1]
params, direct_fun, inverse_fun = init_fun(random.PRNGKey(0), input_dim)
mapped_inputs, log_det_jacobian = direct_fun(params, inputs)
test.assertTrue(inputs.shape == mapped_inputs.shape)
test.assertTrue((inputs.shape[0],) == log_det_jacobian.shape)
mapped_inputs, log_det_jacobian = inverse_fun(params, inputs)
test.assertTrue(inputs.shape == mapped_inputs.shape)
test.assertTrue((inputs.shape[0],) == log_det_jacobian.shape)
class Tests(unittest.TestCase):
def test_shuffle(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.Shuffle())
def test_reverse(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.Reverse())
def test_affine_coupling(self):
def transform(rng, input_dim, output_dim, hidden_dim=64, act=stax.Relu):
init_fun, apply_fun = stax.serial(
stax.Dense(hidden_dim), act, stax.Dense(hidden_dim), act, stax.Dense(output_dim),
)
_, params = init_fun(rng, (input_dim,))
return params, apply_fun
inputs = random.uniform(random.PRNGKey(0), (20, 5), minval=-10.0, maxval=10.0)
init_fun = flows.AffineCoupling(transform)
for test in (returns_correct_shape, is_bijective):
test(self, init_fun, inputs)
init_fun = flows.AffineCouplingSplit(transform, transform)
for test in (returns_correct_shape, is_bijective):
test(self, init_fun, inputs)
def test_made(self):
def get_masks(input_dim, hidden_dim=64, num_hidden=1):
masks = []
input_degrees = np.arange(input_dim)
degrees = [input_degrees]
for n_h in range(num_hidden + 1):
degrees += [np.arange(hidden_dim) % (input_dim - 1)]
degrees += [input_degrees % input_dim - 1]
for (d0, d1) in zip(degrees[:-1], degrees[1:]):
masks += [np.transpose(np.expand_dims(d1, -1) >= np.expand_dims(d0, 0)).astype(np.float32)]
return masks
def masked_transform(rng, input_dim):
masks = get_masks(input_dim, hidden_dim=64, num_hidden=1)
act = stax.Relu
init_fun, apply_fun = stax.serial(
flows.MaskedDense(masks[0]),
act,
flows.MaskedDense(masks[1]),
act,
flows.MaskedDense(masks[2].tile(2)),
)
_, params = init_fun(rng, (input_dim,))
return params, apply_fun
for test in (returns_correct_shape, is_bijective):
test(self, flows.MADE(masked_transform))
def test_actnorm(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.ActNorm())
# Test data-dependent initialization
inputs = random.uniform(random.PRNGKey(0), (20, 3), minval=-10.0, maxval=10.0)
input_dim = inputs.shape[1]
init_fun = flows.Serial(flows.ActNorm())
params, direct_fun, inverse_fun = init_fun(random.PRNGKey(0), inputs.shape[1:], init_inputs=inputs)
mapped_inputs, _ = direct_fun(params, inputs)
self.assertFalse((np.abs(mapped_inputs.mean(0)) > 1e6).any())
self.assertTrue(np.allclose(np.ones(input_dim), mapped_inputs.std(0)))
def test_invertible_linear(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.InvertibleLinear())
def test_fixed_invertible_linear(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.FixedInvertibleLinear())
def test_sigmoid(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.Sigmoid())
def test_logit(self):
inputs = random.uniform(random.PRNGKey(0), (20, 3))
for test in (returns_correct_shape, is_bijective):
test(self, flows.Logit(), inputs)
def test_serial(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.Serial(flows.Shuffle(), flows.Shuffle()))
def test_batchnorm(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.BatchNorm())
def test_neural_spline(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.NeuralSplineCoupling()) | tests/test_bijections.py | import unittest
import jax.numpy as np
from jax import random
from jax.experimental import stax
import flows
def is_bijective(
test, init_fun, inputs=random.uniform(random.PRNGKey(0), (20, 4), minval=-10.0, maxval=10.0), tol=1e-3
):
input_dim = inputs.shape[1]
params, direct_fun, inverse_fun = init_fun(random.PRNGKey(0), input_dim)
mapped_inputs = direct_fun(params, inputs)[0]
reconstructed_inputs = inverse_fun(params, mapped_inputs)[0]
test.assertTrue(np.allclose(inputs, reconstructed_inputs, atol=tol))
def returns_correct_shape(
test, init_fun, inputs=random.uniform(random.PRNGKey(0), (20, 4), minval=-10.0, maxval=10.0)
):
input_dim = inputs.shape[1]
params, direct_fun, inverse_fun = init_fun(random.PRNGKey(0), input_dim)
mapped_inputs, log_det_jacobian = direct_fun(params, inputs)
test.assertTrue(inputs.shape == mapped_inputs.shape)
test.assertTrue((inputs.shape[0],) == log_det_jacobian.shape)
mapped_inputs, log_det_jacobian = inverse_fun(params, inputs)
test.assertTrue(inputs.shape == mapped_inputs.shape)
test.assertTrue((inputs.shape[0],) == log_det_jacobian.shape)
class Tests(unittest.TestCase):
def test_shuffle(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.Shuffle())
def test_reverse(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.Reverse())
def test_affine_coupling(self):
def transform(rng, input_dim, output_dim, hidden_dim=64, act=stax.Relu):
init_fun, apply_fun = stax.serial(
stax.Dense(hidden_dim), act, stax.Dense(hidden_dim), act, stax.Dense(output_dim),
)
_, params = init_fun(rng, (input_dim,))
return params, apply_fun
inputs = random.uniform(random.PRNGKey(0), (20, 5), minval=-10.0, maxval=10.0)
init_fun = flows.AffineCoupling(transform)
for test in (returns_correct_shape, is_bijective):
test(self, init_fun, inputs)
init_fun = flows.AffineCouplingSplit(transform, transform)
for test in (returns_correct_shape, is_bijective):
test(self, init_fun, inputs)
def test_made(self):
def get_masks(input_dim, hidden_dim=64, num_hidden=1):
masks = []
input_degrees = np.arange(input_dim)
degrees = [input_degrees]
for n_h in range(num_hidden + 1):
degrees += [np.arange(hidden_dim) % (input_dim - 1)]
degrees += [input_degrees % input_dim - 1]
for (d0, d1) in zip(degrees[:-1], degrees[1:]):
masks += [np.transpose(np.expand_dims(d1, -1) >= np.expand_dims(d0, 0)).astype(np.float32)]
return masks
def masked_transform(rng, input_dim):
masks = get_masks(input_dim, hidden_dim=64, num_hidden=1)
act = stax.Relu
init_fun, apply_fun = stax.serial(
flows.MaskedDense(masks[0]),
act,
flows.MaskedDense(masks[1]),
act,
flows.MaskedDense(masks[2].tile(2)),
)
_, params = init_fun(rng, (input_dim,))
return params, apply_fun
for test in (returns_correct_shape, is_bijective):
test(self, flows.MADE(masked_transform))
def test_actnorm(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.ActNorm())
# Test data-dependent initialization
inputs = random.uniform(random.PRNGKey(0), (20, 3), minval=-10.0, maxval=10.0)
input_dim = inputs.shape[1]
init_fun = flows.Serial(flows.ActNorm())
params, direct_fun, inverse_fun = init_fun(random.PRNGKey(0), inputs.shape[1:], init_inputs=inputs)
mapped_inputs, _ = direct_fun(params, inputs)
self.assertFalse((np.abs(mapped_inputs.mean(0)) > 1e6).any())
self.assertTrue(np.allclose(np.ones(input_dim), mapped_inputs.std(0)))
def test_invertible_linear(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.InvertibleLinear())
def test_fixed_invertible_linear(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.FixedInvertibleLinear())
def test_sigmoid(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.Sigmoid())
def test_logit(self):
inputs = random.uniform(random.PRNGKey(0), (20, 3))
for test in (returns_correct_shape, is_bijective):
test(self, flows.Logit(), inputs)
def test_serial(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.Serial(flows.Shuffle(), flows.Shuffle()))
def test_batchnorm(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.BatchNorm())
def test_neural_spline(self):
for test in (returns_correct_shape, is_bijective):
test(self, flows.NeuralSplineCoupling()) | 0.674694 | 0.721375 |
import requests
import json
import os
import time
import sys
from util import *
def get_accesstoken(tenantid, client_id, client_secret):
'''GET access token for Authorization'''
endpoint = "https://login.microsoftonline.com/{}/oauth2/token".format(tenantid)
resource ="https://management.azure.com/"
grant_type = "client_credentials"
payload = {"client_id": client_id, "client_secret":client_secret,
"resource":resource, "grant_type":grant_type}
response = requests.post(url= endpoint, data=payload)
json_resp = response.json()
if response.status_code == 200:
return json_resp["access_token"]
else:
print_log("No Access token")
sys.exit(127)
def enable_backup_policy(subscription_id,resource_group,vaultname, policyname,
fabric_name, containerName, protectedItemName, tenantid,
vm_id, policy_id, client_id, client_secret):
'''Enabling Backup policy for Virtual Machine'''
endpoint = "https://management.azure.com/Subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/" \
"protectionContainers/{}/protectedItems/{}" \
"?api-version=2016-12-01".format(subscription_id, resource_group,
vaultname,fabric_name,containerName,protectedItemName)
payload = { "properties": {"protectedItemType": "Microsoft.Compute/virtualMachines",
"sourceResourceId": vm_id,
"policyId": policy_id
}
}
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
response = requests.put(url=endpoint, data=json.dumps(payload), headers=headers)
print_log(response.status_code)
if response.status_code == 200 or response.status_code == 202:
enable_endpoint = response.headers['Location']
enable_api = requests.get(url=enable_endpoint, headers=headers)
print_log(enable_api.status_code)
if enable_api.status_code == 200 or enable_api.status_code == 202:
print_log("Enabling backupolicy for VM Success")
else:
print_log("Enabling Backup policy API failed")
sys.exit(127)
def list_unprotectvm(subscription_id,resource_group, vaultname,
policyname,fabric_name, tenantid,
vmname, policy_id, client_id, client_secret):
'''Listing Unpeotected VMs for region'''
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
endpoint = "https://management.azure.com/Subscriptions/{}/" \
"resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/" \
"{}/backupProtectableItems?api-version=2016-12-01&$filter=backupManagementType " \
"eq 'AzureIaasVM'".format(subscription_id, resource_group, vaultname)
res = requests.get(url=endpoint, headers=headers)
vm_list = res.json()
vm_exists = 0
if vm_list['value']:
for i in vm_list['value']:
if i['properties']['friendlyName'] == vmname:
vm_exists = 1
containerName = "iaasvmcontainer;" + i['name']
protectedItemName = "vm;" + i['name']
vm_id = i['properties']["virtualMachineId"]
print_log(containerName)
print_log(protectedItemName)
print_log(vm_id)
os.environ["containerName"] = containerName
os.environ["protectedItemName"] = protectedItemName
app_tier_name = os.environ['cliqrAppTierName']
json_result = {
"hostName": app_tier_name,
"ipAddress": "",
"environment": {
"containerName": containerName,
"protectedItemName": protectedItemName
}
}
print_result(json.dumps(json_result))
enable_backup_policy(subscription_id, resource_group, vaultname, policyname
, "Azure", containerName, protectedItemName, tenantid,
vm_id, policy_id, client_id, client_secret)
if(vm_exists == 0):
print_log("The VM may not be exist or in some other region")
sys.exit(127)
else:
print_log("No unprotected vms for this region")
sys.exit(127)
def refresh_vm(subscription_id,resource_group, vaultname,
policyname,tenantid, fabric_name, vmname, policy_id, client_id, client_secret):
'''Refresh VM'''
endpoint = "https://management.azure.com/Subscriptions/{}/resourceGroups/" \
"{}/providers/Microsoft.RecoveryServices/vaults/{}" \
"/backupFabrics/{}/" \
"refreshContainers?api-version=2016-12-01".format(subscription_id, resource_group, vaultname, fabric_name)
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
res = requests.post(url=endpoint, headers=headers)
if res.status_code == 202:
refresh_endpoint = res.headers['Location']
refreshvm = requests.get(url=refresh_endpoint, headers=headers)
print_log("Refresh VM API success")
list_unprotectvm(subscription_id,resource_group, vaultname,
policyname, fabric_name, tenantid, vmname, policy_id, client_id, client_secret)
else:
print_log("Error in refresh vm api")
sys.exit(127)
def create_backup_policy(subscription_id,
resource_group, vaultname,
policyname,tenantid, vmname, client_id, client_secret):
'''Creating Backup policy'''
backuppolicy_exists = "https://management.azure.com/Subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults/{}/" \
"backupPolicies?api-version=2017-07-01&" \
"$filter=backupManagementType eq 'AzureIaasVM'".format(subscription_id,
resource_group, vaultname)
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
response = requests.get(url=backuppolicy_exists, headers=headers)
listpolicy = response.json()
policy_exists = "false"
for i in listpolicy['value']:
if i["name"] == policyname and policyname == "DefaultPolicy":
policy_exists = "true"
policy_id = i["id"]
refresh_vm(subscription_id, resource_group, vaultname, policyname, tenantid,
"Azure", vmname, policy_id, client_id, client_secret)
elif i["name"] == policyname:
policy_exists = "true"
policy_id = i["id"]
refresh_vm(subscription_id, resource_group, vaultname, policyname, tenantid,
"Azure", vmname, policy_id, client_id, client_secret)
else:
print_log("policy not exists")
def create_recovery_vault(subscription_id, resource_group, vaultname, tenantid,
location, policyname, vmname, client_id, client_secret):
'''Creating Recovery Service Vault'''
recovery_exists = "https://management.azure.com/subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults?" \
"api-version=2016-06-01".format(subscription_id, resource_group)
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
response = requests.get(url=recovery_exists, headers=headers)
listvault = response.json()
vault_exists = "false"
for i in listvault['value']:
if i["name"] == vaultname:
vault_exists = "true"
print_log("Vault exists")
create_backup_policy(subscription_id, resource_group, vaultname,
policyname, tenantid, vmname, client_id, client_secret)
if vault_exists == "false":
endpoint = "https://management.azure.com/subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults/{}?" \
"api-version=2016-06-01".format(subscription_id, resource_group, vaultname)
payload = {"properties": {},"sku": {"name": "Standard"},"location": "West US"}
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type":"application/json", "Authorization":"Bearer " + access_token}
response = requests.put(url=endpoint, data=json.dumps(payload), headers = headers)
if response.status_code == 200 or response.status_code == 201:
print_log("Vault created")
create_backup_policy(subscription_id,resource_group, vaultname,
policyname,tenantid, vmname, client_id, client_secret)
else:
print_log("Vault not created")
sys.exit(127)
def deletebackup(subscription_id, resource_group, vaultname, tenantid,
location, policyname, vmname, client_id, client_secret):
'''Deleting backup items, backuppolicy and RecoveryServicevault'''
containerName = os.environ.get("containerName", None)
protectedItemName = os.environ.get("protectedItemName", None)
print_log(containerName)
print_log(protectedItemName)
if containerName != None and protectedItemName != None:
deletebcdata= "https://management.azure.com/subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/" \
"Azure/protectionContainers/{}/protectedItems/" \
"{}?api-version=2017-07-01".format(subscription_id, resource_group,
vaultname, containerName, protectedItemName)
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
response = requests.delete(url=deletebcdata, headers=headers)
print_log(response.status_code)
time.sleep(200)
print_log("Backup items deleted")
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
delrecovervault = "https://management.azure.com/subscriptions/{}/resourceGroups/" \
"{}/providers/Microsoft.RecoveryServices/vaults/" \
"{}?api-version=2016-06-01".format(subscription_id, resource_group, vaultname)
response = requests.delete(url=delrecovervault, headers=headers)
if response.status_code == 200:
print_log("Recovery Vault Deleted") | Content/Backup/Azure Backup Service/WorkloadManager/src/azurebackupservice/azure_backup.py | import requests
import json
import os
import time
import sys
from util import *
def get_accesstoken(tenantid, client_id, client_secret):
'''GET access token for Authorization'''
endpoint = "https://login.microsoftonline.com/{}/oauth2/token".format(tenantid)
resource ="https://management.azure.com/"
grant_type = "client_credentials"
payload = {"client_id": client_id, "client_secret":client_secret,
"resource":resource, "grant_type":grant_type}
response = requests.post(url= endpoint, data=payload)
json_resp = response.json()
if response.status_code == 200:
return json_resp["access_token"]
else:
print_log("No Access token")
sys.exit(127)
def enable_backup_policy(subscription_id,resource_group,vaultname, policyname,
fabric_name, containerName, protectedItemName, tenantid,
vm_id, policy_id, client_id, client_secret):
'''Enabling Backup policy for Virtual Machine'''
endpoint = "https://management.azure.com/Subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/" \
"protectionContainers/{}/protectedItems/{}" \
"?api-version=2016-12-01".format(subscription_id, resource_group,
vaultname,fabric_name,containerName,protectedItemName)
payload = { "properties": {"protectedItemType": "Microsoft.Compute/virtualMachines",
"sourceResourceId": vm_id,
"policyId": policy_id
}
}
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
response = requests.put(url=endpoint, data=json.dumps(payload), headers=headers)
print_log(response.status_code)
if response.status_code == 200 or response.status_code == 202:
enable_endpoint = response.headers['Location']
enable_api = requests.get(url=enable_endpoint, headers=headers)
print_log(enable_api.status_code)
if enable_api.status_code == 200 or enable_api.status_code == 202:
print_log("Enabling backupolicy for VM Success")
else:
print_log("Enabling Backup policy API failed")
sys.exit(127)
def list_unprotectvm(subscription_id,resource_group, vaultname,
policyname,fabric_name, tenantid,
vmname, policy_id, client_id, client_secret):
'''Listing Unpeotected VMs for region'''
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
endpoint = "https://management.azure.com/Subscriptions/{}/" \
"resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/" \
"{}/backupProtectableItems?api-version=2016-12-01&$filter=backupManagementType " \
"eq 'AzureIaasVM'".format(subscription_id, resource_group, vaultname)
res = requests.get(url=endpoint, headers=headers)
vm_list = res.json()
vm_exists = 0
if vm_list['value']:
for i in vm_list['value']:
if i['properties']['friendlyName'] == vmname:
vm_exists = 1
containerName = "iaasvmcontainer;" + i['name']
protectedItemName = "vm;" + i['name']
vm_id = i['properties']["virtualMachineId"]
print_log(containerName)
print_log(protectedItemName)
print_log(vm_id)
os.environ["containerName"] = containerName
os.environ["protectedItemName"] = protectedItemName
app_tier_name = os.environ['cliqrAppTierName']
json_result = {
"hostName": app_tier_name,
"ipAddress": "",
"environment": {
"containerName": containerName,
"protectedItemName": protectedItemName
}
}
print_result(json.dumps(json_result))
enable_backup_policy(subscription_id, resource_group, vaultname, policyname
, "Azure", containerName, protectedItemName, tenantid,
vm_id, policy_id, client_id, client_secret)
if(vm_exists == 0):
print_log("The VM may not be exist or in some other region")
sys.exit(127)
else:
print_log("No unprotected vms for this region")
sys.exit(127)
def refresh_vm(subscription_id,resource_group, vaultname,
policyname,tenantid, fabric_name, vmname, policy_id, client_id, client_secret):
'''Refresh VM'''
endpoint = "https://management.azure.com/Subscriptions/{}/resourceGroups/" \
"{}/providers/Microsoft.RecoveryServices/vaults/{}" \
"/backupFabrics/{}/" \
"refreshContainers?api-version=2016-12-01".format(subscription_id, resource_group, vaultname, fabric_name)
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
res = requests.post(url=endpoint, headers=headers)
if res.status_code == 202:
refresh_endpoint = res.headers['Location']
refreshvm = requests.get(url=refresh_endpoint, headers=headers)
print_log("Refresh VM API success")
list_unprotectvm(subscription_id,resource_group, vaultname,
policyname, fabric_name, tenantid, vmname, policy_id, client_id, client_secret)
else:
print_log("Error in refresh vm api")
sys.exit(127)
def create_backup_policy(subscription_id,
resource_group, vaultname,
policyname,tenantid, vmname, client_id, client_secret):
'''Creating Backup policy'''
backuppolicy_exists = "https://management.azure.com/Subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults/{}/" \
"backupPolicies?api-version=2017-07-01&" \
"$filter=backupManagementType eq 'AzureIaasVM'".format(subscription_id,
resource_group, vaultname)
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
response = requests.get(url=backuppolicy_exists, headers=headers)
listpolicy = response.json()
policy_exists = "false"
for i in listpolicy['value']:
if i["name"] == policyname and policyname == "DefaultPolicy":
policy_exists = "true"
policy_id = i["id"]
refresh_vm(subscription_id, resource_group, vaultname, policyname, tenantid,
"Azure", vmname, policy_id, client_id, client_secret)
elif i["name"] == policyname:
policy_exists = "true"
policy_id = i["id"]
refresh_vm(subscription_id, resource_group, vaultname, policyname, tenantid,
"Azure", vmname, policy_id, client_id, client_secret)
else:
print_log("policy not exists")
def create_recovery_vault(subscription_id, resource_group, vaultname, tenantid,
location, policyname, vmname, client_id, client_secret):
'''Creating Recovery Service Vault'''
recovery_exists = "https://management.azure.com/subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults?" \
"api-version=2016-06-01".format(subscription_id, resource_group)
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
response = requests.get(url=recovery_exists, headers=headers)
listvault = response.json()
vault_exists = "false"
for i in listvault['value']:
if i["name"] == vaultname:
vault_exists = "true"
print_log("Vault exists")
create_backup_policy(subscription_id, resource_group, vaultname,
policyname, tenantid, vmname, client_id, client_secret)
if vault_exists == "false":
endpoint = "https://management.azure.com/subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults/{}?" \
"api-version=2016-06-01".format(subscription_id, resource_group, vaultname)
payload = {"properties": {},"sku": {"name": "Standard"},"location": "West US"}
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type":"application/json", "Authorization":"Bearer " + access_token}
response = requests.put(url=endpoint, data=json.dumps(payload), headers = headers)
if response.status_code == 200 or response.status_code == 201:
print_log("Vault created")
create_backup_policy(subscription_id,resource_group, vaultname,
policyname,tenantid, vmname, client_id, client_secret)
else:
print_log("Vault not created")
sys.exit(127)
def deletebackup(subscription_id, resource_group, vaultname, tenantid,
location, policyname, vmname, client_id, client_secret):
'''Deleting backup items, backuppolicy and RecoveryServicevault'''
containerName = os.environ.get("containerName", None)
protectedItemName = os.environ.get("protectedItemName", None)
print_log(containerName)
print_log(protectedItemName)
if containerName != None and protectedItemName != None:
deletebcdata= "https://management.azure.com/subscriptions/{}/resourceGroups/{}/" \
"providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/" \
"Azure/protectionContainers/{}/protectedItems/" \
"{}?api-version=2017-07-01".format(subscription_id, resource_group,
vaultname, containerName, protectedItemName)
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
response = requests.delete(url=deletebcdata, headers=headers)
print_log(response.status_code)
time.sleep(200)
print_log("Backup items deleted")
access_token = get_accesstoken(tenantid, client_id, client_secret)
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + access_token}
delrecovervault = "https://management.azure.com/subscriptions/{}/resourceGroups/" \
"{}/providers/Microsoft.RecoveryServices/vaults/" \
"{}?api-version=2016-06-01".format(subscription_id, resource_group, vaultname)
response = requests.delete(url=delrecovervault, headers=headers)
if response.status_code == 200:
print_log("Recovery Vault Deleted") | 0.147095 | 0.054626 |
from __future__ import division, absolute_import, print_function
import json
import os.path
import unittest
from test._common import RSRC
from beetsplug.acousticbrainz import AcousticPlugin, ABSCHEME
class MapDataToSchemeTest(unittest.TestCase):
def test_basic(self):
ab = AcousticPlugin()
data = {'key 1': 'value 1', 'key 2': 'value 2'}
scheme = {'key 1': 'attribute 1', 'key 2': 'attribute 2'}
mapping = set(ab._map_data_to_scheme(data, scheme))
self.assertEqual(mapping, {('attribute 1', 'value 1'),
('attribute 2', 'value 2')})
def test_recurse(self):
ab = AcousticPlugin()
data = {
'key': 'value',
'group': {
'subkey': 'subvalue',
'subgroup': {
'subsubkey': 'subsubvalue'
}
}
}
scheme = {
'key': 'attribute 1',
'group': {
'subkey': 'attribute 2',
'subgroup': {
'subsubkey': 'attribute 3'
}
}
}
mapping = set(ab._map_data_to_scheme(data, scheme))
self.assertEqual(mapping, {('attribute 1', 'value'),
('attribute 2', 'subvalue'),
('attribute 3', 'subsubvalue')})
def test_composite(self):
ab = AcousticPlugin()
data = {'key 1': 'part 1', 'key 2': 'part 2'}
scheme = {'key 1': ('attribute', 0), 'key 2': ('attribute', 1)}
mapping = set(ab._map_data_to_scheme(data, scheme))
self.assertEqual(mapping, {('attribute', 'part 1 part 2')})
def test_realistic(self):
ab = AcousticPlugin()
data_path = os.path.join(RSRC, b'acousticbrainz/data.json')
with open(data_path) as res:
data = json.load(res)
mapping = set(ab._map_data_to_scheme(data, ABSCHEME))
expected = {
('chords_key', 'A'),
('average_loudness', 0.815025985241),
('mood_acoustic', 0.415711194277),
('chords_changes_rate', 0.0445116683841),
('tonal', 0.874250173569),
('mood_sad', 0.299694597721),
('bpm', 162.532119751),
('gender', 'female'),
('initial_key', 'A minor'),
('chords_number_rate', 0.00194468453992),
('mood_relaxed', 0.123632438481),
('chords_scale', 'minor'),
('voice_instrumental', 'instrumental'),
('key_strength', 0.636936545372),
('genre_rosamerica', 'roc'),
('mood_party', 0.234383180737),
('mood_aggressive', 0.0779221653938),
('danceable', 0.143928021193),
('rhythm', 'VienneseWaltz'),
('mood_electronic', 0.339881360531),
('mood_happy', 0.0894767045975),
('moods_mirex', "Cluster3"),
('timbre', "bright")
}
self.assertEqual(mapping, expected)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite') | test/test_acousticbrainz.py | from __future__ import division, absolute_import, print_function
import json
import os.path
import unittest
from test._common import RSRC
from beetsplug.acousticbrainz import AcousticPlugin, ABSCHEME
class MapDataToSchemeTest(unittest.TestCase):
def test_basic(self):
ab = AcousticPlugin()
data = {'key 1': 'value 1', 'key 2': 'value 2'}
scheme = {'key 1': 'attribute 1', 'key 2': 'attribute 2'}
mapping = set(ab._map_data_to_scheme(data, scheme))
self.assertEqual(mapping, {('attribute 1', 'value 1'),
('attribute 2', 'value 2')})
def test_recurse(self):
ab = AcousticPlugin()
data = {
'key': 'value',
'group': {
'subkey': 'subvalue',
'subgroup': {
'subsubkey': 'subsubvalue'
}
}
}
scheme = {
'key': 'attribute 1',
'group': {
'subkey': 'attribute 2',
'subgroup': {
'subsubkey': 'attribute 3'
}
}
}
mapping = set(ab._map_data_to_scheme(data, scheme))
self.assertEqual(mapping, {('attribute 1', 'value'),
('attribute 2', 'subvalue'),
('attribute 3', 'subsubvalue')})
def test_composite(self):
ab = AcousticPlugin()
data = {'key 1': 'part 1', 'key 2': 'part 2'}
scheme = {'key 1': ('attribute', 0), 'key 2': ('attribute', 1)}
mapping = set(ab._map_data_to_scheme(data, scheme))
self.assertEqual(mapping, {('attribute', 'part 1 part 2')})
def test_realistic(self):
ab = AcousticPlugin()
data_path = os.path.join(RSRC, b'acousticbrainz/data.json')
with open(data_path) as res:
data = json.load(res)
mapping = set(ab._map_data_to_scheme(data, ABSCHEME))
expected = {
('chords_key', 'A'),
('average_loudness', 0.815025985241),
('mood_acoustic', 0.415711194277),
('chords_changes_rate', 0.0445116683841),
('tonal', 0.874250173569),
('mood_sad', 0.299694597721),
('bpm', 162.532119751),
('gender', 'female'),
('initial_key', 'A minor'),
('chords_number_rate', 0.00194468453992),
('mood_relaxed', 0.123632438481),
('chords_scale', 'minor'),
('voice_instrumental', 'instrumental'),
('key_strength', 0.636936545372),
('genre_rosamerica', 'roc'),
('mood_party', 0.234383180737),
('mood_aggressive', 0.0779221653938),
('danceable', 0.143928021193),
('rhythm', 'VienneseWaltz'),
('mood_electronic', 0.339881360531),
('mood_happy', 0.0894767045975),
('moods_mirex', "Cluster3"),
('timbre', "bright")
}
self.assertEqual(mapping, expected)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite') | 0.662687 | 0.291687 |
import requests
import datetime
from requests import ConnectionError
from ..exceptions import (
APIError,
InvalidResponse,
)
from ..utils import check_status_code
from ..compat import json
# monkeypatching requests
# https://github.com/kennethreitz/requests/issues/1595
requests.models.json = json
class BaseEndpoint(object):
connect_timeout = 3.05
read_timeout = 16
_error = APIError
def __init__(self, parent):
"""
:param parent: API client.
"""
self.client = parent
def request(self, method, params, session):
"""
:param str method: Betfair api-ng method to be used.
:param dict params: Params to be used in request
:param Session session: Requests session to be used, reduces latency.
"""
session = session or self.client.session
request = self.create_req(method, params)
date_time_sent = datetime.datetime.utcnow()
try:
response = session.post(
self.url,
data=request,
headers=self.client.request_headers,
timeout=(self.connect_timeout, self.read_timeout)
)
except ConnectionError:
raise APIError(None, method, params, 'ConnectionError')
except Exception as e:
raise APIError(None, method, params, e)
elapsed_time = (datetime.datetime.utcnow()-date_time_sent).total_seconds()
check_status_code(response)
try:
response_data = response.json()
except ValueError:
raise InvalidResponse(response.text)
if self._error_handler:
self._error_handler(response_data, method, params)
return response_data, elapsed_time
@staticmethod
def create_req(method, params):
"""
:param method: Betfair api-ng method to be used.
:param params: Params to be used in request.
:return: Json payload.
"""
return json.dumps(
{
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': 1
}
)
def _error_handler(self, response, method=None, params=None):
"""
:param response: Json response.
:param params: Params to be used in request.
:param method: Betfair api-ng method to be used.
:return: None if no error or _error raised.
"""
if response.get('result'):
return
elif response.get('error'):
raise self._error(response, method, params)
def process_response(self, response_json, resource, elapsed_time, lightweight):
"""
:param dict/list response_json: Response in dict format
:param BaseResource resource: Resource data structure
:param float elapsed_time: Elapsed time of request
:param bool lightweight: If True will return dict not a resource (22x faster)
"""
if isinstance(response_json, list):
result = response_json
else:
result = response_json.get('result', response_json)
if lightweight:
return result
elif self.client.lightweight and lightweight is not False:
return result
elif isinstance(result, list):
try:
return [resource(elapsed_time=elapsed_time, **x) for x in result]
except TypeError:
raise InvalidResponse(response=result)
else:
try:
return resource(elapsed_time=elapsed_time, **result)
except TypeError:
raise InvalidResponse(response=result)
@property
def url(self):
return '%s%s' % (self.client.api_uri, 'betting/json-rpc/v1') | betfairlightweight/endpoints/baseendpoint.py | import requests
import datetime
from requests import ConnectionError
from ..exceptions import (
APIError,
InvalidResponse,
)
from ..utils import check_status_code
from ..compat import json
# monkeypatching requests
# https://github.com/kennethreitz/requests/issues/1595
requests.models.json = json
class BaseEndpoint(object):
connect_timeout = 3.05
read_timeout = 16
_error = APIError
def __init__(self, parent):
"""
:param parent: API client.
"""
self.client = parent
def request(self, method, params, session):
"""
:param str method: Betfair api-ng method to be used.
:param dict params: Params to be used in request
:param Session session: Requests session to be used, reduces latency.
"""
session = session or self.client.session
request = self.create_req(method, params)
date_time_sent = datetime.datetime.utcnow()
try:
response = session.post(
self.url,
data=request,
headers=self.client.request_headers,
timeout=(self.connect_timeout, self.read_timeout)
)
except ConnectionError:
raise APIError(None, method, params, 'ConnectionError')
except Exception as e:
raise APIError(None, method, params, e)
elapsed_time = (datetime.datetime.utcnow()-date_time_sent).total_seconds()
check_status_code(response)
try:
response_data = response.json()
except ValueError:
raise InvalidResponse(response.text)
if self._error_handler:
self._error_handler(response_data, method, params)
return response_data, elapsed_time
@staticmethod
def create_req(method, params):
"""
:param method: Betfair api-ng method to be used.
:param params: Params to be used in request.
:return: Json payload.
"""
return json.dumps(
{
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': 1
}
)
def _error_handler(self, response, method=None, params=None):
"""
:param response: Json response.
:param params: Params to be used in request.
:param method: Betfair api-ng method to be used.
:return: None if no error or _error raised.
"""
if response.get('result'):
return
elif response.get('error'):
raise self._error(response, method, params)
def process_response(self, response_json, resource, elapsed_time, lightweight):
"""
:param dict/list response_json: Response in dict format
:param BaseResource resource: Resource data structure
:param float elapsed_time: Elapsed time of request
:param bool lightweight: If True will return dict not a resource (22x faster)
"""
if isinstance(response_json, list):
result = response_json
else:
result = response_json.get('result', response_json)
if lightweight:
return result
elif self.client.lightweight and lightweight is not False:
return result
elif isinstance(result, list):
try:
return [resource(elapsed_time=elapsed_time, **x) for x in result]
except TypeError:
raise InvalidResponse(response=result)
else:
try:
return resource(elapsed_time=elapsed_time, **result)
except TypeError:
raise InvalidResponse(response=result)
@property
def url(self):
return '%s%s' % (self.client.api_uri, 'betting/json-rpc/v1') | 0.488771 | 0.088347 |
import argparse
import datetime
import json
import os
import platform
import subprocess
import sys
import types
import semver
def log(message, command=False):
prefix = "$" if command else "#"
print(f"{prefix} {message}", file=sys.stderr)
def run_command(description, args, capture_output=True, shell=True):
if description:
log(description)
printed_args = args.join(" ") if type(args) == list else args
log(printed_args, command=True)
stdout = subprocess.PIPE if capture_output else None
completed_process = subprocess.run(args, stdout=stdout, shell=shell, check=True, encoding="utf-8")
return completed_process.stdout.rstrip() if capture_output else None
def clone_repo(url, dir):
run_command(f"Cloning repo {url} -> {dir}", f"git clone {url} {dir}")
os.chdir(dir)
def fetch_repo(dir):
os.chdir(dir)
run_command(f"Fetching repo", f"git fetch")
def get_base_version(base_version):
base_version = base_version or run_command("Getting base version", f"git show origin/stable:./VERSION")
base_version = semver.VersionInfo.parse(base_version)
return base_version
def get_target_version(base_version, release_type):
target_version = base_version.next_version(release_type)
return target_version
def resolve_target_version(target_version, prerelease_number):
for _ in range(prerelease_number or 0):
target_version = target_version.bump_prerelease()
return target_version
def get_cached_version():
with open("VERSION") as f:
cached_version = f.read().strip()
cached_version = semver.VersionInfo.parse(cached_version)
return cached_version
def get_branch(version):
branch = f"release/{version.major}.{version.minor}.x"
return branch
def get_tag(version):
tag = f"v{version}"
return tag
def write_conf(type, base_version, target_version, branch, tag, ref):
# To keep links in sync, we need a consistent date. If it takes a long time to get through the process,
# there's a chance that the date the release finally gets published will be later than the date we cache here,
# but it's not a huge deal.
date = datetime.date.today().isoformat()
conf = dict(type=type, base_version=str(base_version), target_version=str(target_version), branch=branch, tag=tag, ref=ref, date=date)
log(f"conf = {conf}")
with open(".release_conf.json", "w") as f:
json.dump(conf, f)
return conf
def read_conf(args):
os.chdir(args.repo_dir)
with open(".release_conf.json", "r") as f:
conf = types.SimpleNamespace(**json.load(f))
conf.base_version = semver.VersionInfo.parse(conf.base_version)
conf.target_version = semver.VersionInfo.parse(conf.target_version)
return conf
def create_branch(type, branch, ref, force=False):
create_arg = "-C" if force else "-c"
run_command(f"Creating {type} branch {branch} -> {ref}", f"git switch {create_arg} {branch} {ref}")
def switch_branch(type, branch):
run_command(f"Switching to {type} branch {branch}", f"git switch {branch}")
def commit(what, files, message=None):
message = message or f"Updated {what}."
run_command(f"Committing {what}", f"git add {' '.join(files)} && git commit -m '{message}'")
def push(what, force=False):
force_arg = " --force-with-lease" if force else ""
run_command(f"Pushing {what}", f"git push{force_arg} origin HEAD")
def init(args):
log(f"*** INITIALIZING RELEASE ***")
if args.clone:
clone_repo(args.repo_url, args.repo_dir)
else:
fetch_repo(args.repo_dir)
base_version = get_base_version(args.base_version)
target_version = get_target_version(base_version, args.type)
branch = get_branch(target_version)
tag = get_tag(target_version)
args.ref = args.ref or "main"
write_conf(args.type, base_version, target_version, branch, tag, args.ref)
if args.type == "patch":
switch_branch("release", branch)
else:
create_branch("release", branch, args.ref, force=args.force)
def cherry(args):
log(f"*** CHERRY PICKING COMMITS ***")
conf = read_conf(args)
if args.commit:
run_command("Cherry picking commits", f"git cherry-pick -x {' '.join(args.commit)}")
def changelog(args):
log(f"*** UPDATING CHANGELOG ***")
conf = read_conf(args)
# There are two possibilities for changelog items:
# * All unreleased items are going in this release (more common, when the release contains the current main branch).
# In this case, we expect a single heading "Unreleased", containing all unreleased items, which will be promoted
# to a versioned section; for future items, a new "Unreleased" section will be generated.
# * A subset of unreleased items are going in this release (less common, when the release omits some changes).
# In this case, we expect a heading "Unreleased (<VERSION>)", containing the subset of unreleased items going in
# this release, which will be promoted to a versioned section, and another "Unreleased" section containing the
# remaining unreleased items, which will be left as is (for future items).
full_heading_regex = "^## \\[Unreleased\\](.*)$"
escaped_target_version = str(conf.target_version).replace(".", "\\.")
subset_heading_regex = f"^## \\[Unreleased ({escaped_target_version})\\](.*)$"
subset = run_command("Looking for subset heading", f"grep -q '{subset_heading_regex}' CHANGELOG.md && echo True || echo False") == "True"
heading_regex = subset_heading_regex if subset else full_heading_regex
base_tag = get_tag(conf.base_version)
diff_url = f"https://github.com/opendp/opendp/compare/{base_tag}...{conf.tag}"
replacement = f"## [{conf.target_version}] - {conf.date}\\n[{conf.target_version}]: {diff_url}"
# If this isn't a subset, prepend a new unreleased section.
if not subset:
replacement = "## [Unreleased](https://github.com/opendp/opendp/compare/stable...HEAD)\\n\\n\\n" + replacement
substitution_arg = f"-e 's|{heading_regex}|{replacement}|'"
inplace_arg = "-i ''" if platform.system() == "Darwin" else "-i"
run_command("Updating CHANGELOG", f"sed {inplace_arg} {substitution_arg} CHANGELOG.md")
commit("CHANGELOG", ["CHANGELOG.md"], f"RELEASE_TOOL: Updated CHANGELOG.md for {conf.target_version}.")
def version(args):
log(f"*** UPDATING VERSION ***")
conf = read_conf(args)
cached_version = get_cached_version()
# Resolve the target version with the prerelease number.
resolved_target_version = resolve_target_version(conf.target_version, args.prerelease_number)
log(f"Updating version -> {resolved_target_version}")
versioned_files = [
"setup.cfg",
]
log(f"Updating versioned files")
inplace_arg = "-i ''" if platform.system() == "Darwin" else "-i"
run_command(None, f"sed {inplace_arg} 's/^version = {cached_version}$/version = {resolved_target_version}/' setup.cfg")
commit("versioned files", versioned_files, f"RELEASE_TOOL: Set version to {resolved_target_version}.")
def python_version(version):
# Python doesn't like versions of the form "X.Y.Z-rc.N" (even though they're correct), and collapses them
# to "X.Y.ZrcN", but semver can't handle those, so we map to strings.
if version.prerelease:
version = f"{version.major}.{version.minor}.{version.patch}rc{version.prerelease.split('.')[1]}"
else:
version = str(version)
return version
def sanity(venv, version, published=False):
version = python_version(version)
run_command("Creating venv", f"rm -rf {venv} && python -m venv {venv}")
package = f"opendp=={version}" if published else f"wheelhouse/opendp-{version}-py3-none-any.whl"
run_command(f"Installing opendp-pytorch {version}", f"source {venv}/bin/activate && pip install {package}")
def preflight(args):
log(f"*** RUNNING PREFLIGHT TEST ***")
conf = read_conf(args)
# We may be doing a prerelease, so use the version that was cached in the VERSION file.
cached_version = get_cached_version()
run_command(f"Building locally", "python tools/build_tool.py all")
sanity(args.venv, cached_version, published=False)
def create(args):
log(f"*** CREATING RELEASE ***")
conf = read_conf(args)
push("release", args.force)
# We may be doing a prerelease, so use the version that was cached in the VERSION file.
cached_version = get_cached_version()
resolved_tag = get_tag(cached_version)
# Just in case, clear out any existing tag, so a new one will be created by GitHub.
run_command("Clearing tag", f"git push origin :refs/tags/{resolved_tag}")
title = f"OpenDP {cached_version}"
notes = f"[Changelog](https://github.com/opendp/opendp/blob/main/CHANGELOG.md#{conf.target_version}---{conf.date})"
prerelease_arg = " -p" if cached_version.prerelease else ""
draft_arg = " -d" if args.draft else ""
run_command("Creating GitHub Release", f"gh release create {resolved_tag} --target {conf.branch} -t '{title}' -n '{notes}'{prerelease_arg}{draft_arg}")
def watch(args):
log(f"*** WATCHING RELEASE ***")
conf = read_conf(args)
# Assumes most recent workflow is ours!
line = run_command("Listing workflows", f"gh run list -w Release | head -n 1")
descriptor = line.split("\t")
if len(descriptor) != 9:
raise Exception("Couldn't parse workflow descriptor", line)
id = descriptor[6]
run_command(f"Watching workflow {line.strip()}", f"gh run watch {id} --exit-status", capture_output=False)
def postflight(args):
log(f"*** RUNNING TEST ***")
conf = read_conf(args)
# We may be doing a prerelease, so use the version that was cached in the VERSION file.
cached_version = get_cached_version()
sanity(args.venv, cached_version, published=True)
def reconcile(args):
log(f"*** RECONCILING ***")
conf = read_conf(args)
reconciliation_branch = f"{conf.target_version}-reconciliation"
reconciled_files = ["CHANGELOG.md"]
create_branch("reconciliation", reconciliation_branch, "main", args.force)
run_command("Copying reconciled files from release branch", f"git restore -s {conf.branch} -- {' '.join(reconciled_files)}")
commit("reconciled files", reconciled_files, f"RELEASE_TOOL: Reconciled files from {conf.target_version}.")
push("reconciled files", args.force)
draft_arg = " -d" if args.draft else ""
run_command("Creating reconciliation PR", f"gh pr create -B main -f{draft_arg}")
def meta(args):
init_args = [f"init -t {args.command}"]
cherry_args = [f"cherry {' '.join(args.commit)}"] if args.command == "patch" else []
body_args = [
"changelog",
"version -p 1",
"preflight",
"create",
"watch",
"postflight",
"version",
"preflight",
"create",
"watch",
"postflight",
]
reconcile_args = [] if args.command == "patch" else []
meta_args = init_args + cherry_args + body_args + reconcile_args
for args in meta_args:
_main(f"meta {args}".split())
def _main(argv):
parser = argparse.ArgumentParser(description="OpenDP release tool")
parser.add_argument("-u", "--repo-url", default="<EMAIL>:opendp/opendp.git", help="Remote repo URL")
parser.add_argument("-d", "--repo-dir", default="/tmp/opendp-release", help="Local repo directory")
subparsers = parser.add_subparsers(dest="COMMAND", help="Command to run")
subparsers.required = True
subparser = subparsers.add_parser("init", help="Initialize the release process")
subparser.set_defaults(func=init)
subparser.add_argument("-c", "--clone", dest="clone", action="store_true", default=True)
subparser.add_argument("-nc", "--no-clone", dest="clone", action="store_false")
subparser.add_argument("-b", "--base-version")
subparser.add_argument("-t", "--type", choices=["major", "minor", "patch"], required=True)
subparser.add_argument("-r", "--ref")
subparser.add_argument("-f", "--force", dest="force", action="store_true", default=False)
subparser.add_argument("-nf", "--no-force", dest="force", action="store_false")
subparser = subparsers.add_parser("cherry", help="Cherry pick commits")
subparser.set_defaults(func=cherry)
subparser.add_argument("commit", nargs="+")
subparser = subparsers.add_parser("changelog", help="Update CHANGELOG file")
subparser.set_defaults(func=changelog)
subparser = subparsers.add_parser("version", help="Update versioned files")
subparser.set_defaults(func=version)
subparser.add_argument("-p", "--prerelease-number", type=int)
subparser = subparsers.add_parser("preflight", help="Run preflight test")
subparser.set_defaults(func=preflight)
subparser.add_argument("-e", "--venv", default="preflight-venv", help="Virtual environment directory")
subparser = subparsers.add_parser("create", help="Create a release")
subparser.set_defaults(func=create)
subparser.add_argument("-f", "--force", dest="force", action="store_true", default=False)
subparser.add_argument("-nf", "--no-force", dest="force", action="store_false")
subparser.add_argument("-n", "--draft", dest="draft", action="store_true", default=False)
subparser.add_argument("-nn", "--no-draft", dest="draft", action="store_false")
subparser = subparsers.add_parser("watch", help="Watch release progress")
subparser.set_defaults(func=watch)
subparser = subparsers.add_parser("postflight", help="Run postflight test")
subparser.set_defaults(func=postflight)
subparser.add_argument("-e", "--venv", default="postflight-venv", help="Virtual environment directory")
subparser = subparsers.add_parser("reconcile", help="Reconcile after the final release")
subparser.set_defaults(func=reconcile)
subparser.add_argument("-f", "--force", dest="force", action="store_true", default=False)
subparser.add_argument("-nf", "--no-force", dest="force", action="store_false")
subparser.add_argument("-n", "--draft", dest="draft", action="store_true", default=False)
subparser.add_argument("-nn", "--no-draft", dest="draft", action="store_false")
subparser = subparsers.add_parser("major", help="Execute a typical major release")
subparser.set_defaults(func=meta, command="major")
subparser = subparsers.add_parser("minor", help="Execute a typical minor release")
subparser.set_defaults(func=meta, command="minor")
subparser = subparsers.add_parser("patch", help="Execute a typical patch release")
subparser.set_defaults(func=meta, command="patch")
subparser.add_argument("commit", nargs="+")
args = parser.parse_args(argv[1:])
args.func(args)
def main():
_main(sys.argv)
if __name__ == "__main__":
main() | tools/release_tool.py | import argparse
import datetime
import json
import os
import platform
import subprocess
import sys
import types
import semver
def log(message, command=False):
prefix = "$" if command else "#"
print(f"{prefix} {message}", file=sys.stderr)
def run_command(description, args, capture_output=True, shell=True):
if description:
log(description)
printed_args = args.join(" ") if type(args) == list else args
log(printed_args, command=True)
stdout = subprocess.PIPE if capture_output else None
completed_process = subprocess.run(args, stdout=stdout, shell=shell, check=True, encoding="utf-8")
return completed_process.stdout.rstrip() if capture_output else None
def clone_repo(url, dir):
run_command(f"Cloning repo {url} -> {dir}", f"git clone {url} {dir}")
os.chdir(dir)
def fetch_repo(dir):
os.chdir(dir)
run_command(f"Fetching repo", f"git fetch")
def get_base_version(base_version):
base_version = base_version or run_command("Getting base version", f"git show origin/stable:./VERSION")
base_version = semver.VersionInfo.parse(base_version)
return base_version
def get_target_version(base_version, release_type):
target_version = base_version.next_version(release_type)
return target_version
def resolve_target_version(target_version, prerelease_number):
for _ in range(prerelease_number or 0):
target_version = target_version.bump_prerelease()
return target_version
def get_cached_version():
with open("VERSION") as f:
cached_version = f.read().strip()
cached_version = semver.VersionInfo.parse(cached_version)
return cached_version
def get_branch(version):
branch = f"release/{version.major}.{version.minor}.x"
return branch
def get_tag(version):
tag = f"v{version}"
return tag
def write_conf(type, base_version, target_version, branch, tag, ref):
# To keep links in sync, we need a consistent date. If it takes a long time to get through the process,
# there's a chance that the date the release finally gets published will be later than the date we cache here,
# but it's not a huge deal.
date = datetime.date.today().isoformat()
conf = dict(type=type, base_version=str(base_version), target_version=str(target_version), branch=branch, tag=tag, ref=ref, date=date)
log(f"conf = {conf}")
with open(".release_conf.json", "w") as f:
json.dump(conf, f)
return conf
def read_conf(args):
os.chdir(args.repo_dir)
with open(".release_conf.json", "r") as f:
conf = types.SimpleNamespace(**json.load(f))
conf.base_version = semver.VersionInfo.parse(conf.base_version)
conf.target_version = semver.VersionInfo.parse(conf.target_version)
return conf
def create_branch(type, branch, ref, force=False):
create_arg = "-C" if force else "-c"
run_command(f"Creating {type} branch {branch} -> {ref}", f"git switch {create_arg} {branch} {ref}")
def switch_branch(type, branch):
run_command(f"Switching to {type} branch {branch}", f"git switch {branch}")
def commit(what, files, message=None):
message = message or f"Updated {what}."
run_command(f"Committing {what}", f"git add {' '.join(files)} && git commit -m '{message}'")
def push(what, force=False):
force_arg = " --force-with-lease" if force else ""
run_command(f"Pushing {what}", f"git push{force_arg} origin HEAD")
def init(args):
log(f"*** INITIALIZING RELEASE ***")
if args.clone:
clone_repo(args.repo_url, args.repo_dir)
else:
fetch_repo(args.repo_dir)
base_version = get_base_version(args.base_version)
target_version = get_target_version(base_version, args.type)
branch = get_branch(target_version)
tag = get_tag(target_version)
args.ref = args.ref or "main"
write_conf(args.type, base_version, target_version, branch, tag, args.ref)
if args.type == "patch":
switch_branch("release", branch)
else:
create_branch("release", branch, args.ref, force=args.force)
def cherry(args):
log(f"*** CHERRY PICKING COMMITS ***")
conf = read_conf(args)
if args.commit:
run_command("Cherry picking commits", f"git cherry-pick -x {' '.join(args.commit)}")
def changelog(args):
log(f"*** UPDATING CHANGELOG ***")
conf = read_conf(args)
# There are two possibilities for changelog items:
# * All unreleased items are going in this release (more common, when the release contains the current main branch).
# In this case, we expect a single heading "Unreleased", containing all unreleased items, which will be promoted
# to a versioned section; for future items, a new "Unreleased" section will be generated.
# * A subset of unreleased items are going in this release (less common, when the release omits some changes).
# In this case, we expect a heading "Unreleased (<VERSION>)", containing the subset of unreleased items going in
# this release, which will be promoted to a versioned section, and another "Unreleased" section containing the
# remaining unreleased items, which will be left as is (for future items).
full_heading_regex = "^## \\[Unreleased\\](.*)$"
escaped_target_version = str(conf.target_version).replace(".", "\\.")
subset_heading_regex = f"^## \\[Unreleased ({escaped_target_version})\\](.*)$"
subset = run_command("Looking for subset heading", f"grep -q '{subset_heading_regex}' CHANGELOG.md && echo True || echo False") == "True"
heading_regex = subset_heading_regex if subset else full_heading_regex
base_tag = get_tag(conf.base_version)
diff_url = f"https://github.com/opendp/opendp/compare/{base_tag}...{conf.tag}"
replacement = f"## [{conf.target_version}] - {conf.date}\\n[{conf.target_version}]: {diff_url}"
# If this isn't a subset, prepend a new unreleased section.
if not subset:
replacement = "## [Unreleased](https://github.com/opendp/opendp/compare/stable...HEAD)\\n\\n\\n" + replacement
substitution_arg = f"-e 's|{heading_regex}|{replacement}|'"
inplace_arg = "-i ''" if platform.system() == "Darwin" else "-i"
run_command("Updating CHANGELOG", f"sed {inplace_arg} {substitution_arg} CHANGELOG.md")
commit("CHANGELOG", ["CHANGELOG.md"], f"RELEASE_TOOL: Updated CHANGELOG.md for {conf.target_version}.")
def version(args):
log(f"*** UPDATING VERSION ***")
conf = read_conf(args)
cached_version = get_cached_version()
# Resolve the target version with the prerelease number.
resolved_target_version = resolve_target_version(conf.target_version, args.prerelease_number)
log(f"Updating version -> {resolved_target_version}")
versioned_files = [
"setup.cfg",
]
log(f"Updating versioned files")
inplace_arg = "-i ''" if platform.system() == "Darwin" else "-i"
run_command(None, f"sed {inplace_arg} 's/^version = {cached_version}$/version = {resolved_target_version}/' setup.cfg")
commit("versioned files", versioned_files, f"RELEASE_TOOL: Set version to {resolved_target_version}.")
def python_version(version):
# Python doesn't like versions of the form "X.Y.Z-rc.N" (even though they're correct), and collapses them
# to "X.Y.ZrcN", but semver can't handle those, so we map to strings.
if version.prerelease:
version = f"{version.major}.{version.minor}.{version.patch}rc{version.prerelease.split('.')[1]}"
else:
version = str(version)
return version
def sanity(venv, version, published=False):
version = python_version(version)
run_command("Creating venv", f"rm -rf {venv} && python -m venv {venv}")
package = f"opendp=={version}" if published else f"wheelhouse/opendp-{version}-py3-none-any.whl"
run_command(f"Installing opendp-pytorch {version}", f"source {venv}/bin/activate && pip install {package}")
def preflight(args):
log(f"*** RUNNING PREFLIGHT TEST ***")
conf = read_conf(args)
# We may be doing a prerelease, so use the version that was cached in the VERSION file.
cached_version = get_cached_version()
run_command(f"Building locally", "python tools/build_tool.py all")
sanity(args.venv, cached_version, published=False)
def create(args):
log(f"*** CREATING RELEASE ***")
conf = read_conf(args)
push("release", args.force)
# We may be doing a prerelease, so use the version that was cached in the VERSION file.
cached_version = get_cached_version()
resolved_tag = get_tag(cached_version)
# Just in case, clear out any existing tag, so a new one will be created by GitHub.
run_command("Clearing tag", f"git push origin :refs/tags/{resolved_tag}")
title = f"OpenDP {cached_version}"
notes = f"[Changelog](https://github.com/opendp/opendp/blob/main/CHANGELOG.md#{conf.target_version}---{conf.date})"
prerelease_arg = " -p" if cached_version.prerelease else ""
draft_arg = " -d" if args.draft else ""
run_command("Creating GitHub Release", f"gh release create {resolved_tag} --target {conf.branch} -t '{title}' -n '{notes}'{prerelease_arg}{draft_arg}")
def watch(args):
log(f"*** WATCHING RELEASE ***")
conf = read_conf(args)
# Assumes most recent workflow is ours!
line = run_command("Listing workflows", f"gh run list -w Release | head -n 1")
descriptor = line.split("\t")
if len(descriptor) != 9:
raise Exception("Couldn't parse workflow descriptor", line)
id = descriptor[6]
run_command(f"Watching workflow {line.strip()}", f"gh run watch {id} --exit-status", capture_output=False)
def postflight(args):
log(f"*** RUNNING TEST ***")
conf = read_conf(args)
# We may be doing a prerelease, so use the version that was cached in the VERSION file.
cached_version = get_cached_version()
sanity(args.venv, cached_version, published=True)
def reconcile(args):
log(f"*** RECONCILING ***")
conf = read_conf(args)
reconciliation_branch = f"{conf.target_version}-reconciliation"
reconciled_files = ["CHANGELOG.md"]
create_branch("reconciliation", reconciliation_branch, "main", args.force)
run_command("Copying reconciled files from release branch", f"git restore -s {conf.branch} -- {' '.join(reconciled_files)}")
commit("reconciled files", reconciled_files, f"RELEASE_TOOL: Reconciled files from {conf.target_version}.")
push("reconciled files", args.force)
draft_arg = " -d" if args.draft else ""
run_command("Creating reconciliation PR", f"gh pr create -B main -f{draft_arg}")
def meta(args):
init_args = [f"init -t {args.command}"]
cherry_args = [f"cherry {' '.join(args.commit)}"] if args.command == "patch" else []
body_args = [
"changelog",
"version -p 1",
"preflight",
"create",
"watch",
"postflight",
"version",
"preflight",
"create",
"watch",
"postflight",
]
reconcile_args = [] if args.command == "patch" else []
meta_args = init_args + cherry_args + body_args + reconcile_args
for args in meta_args:
_main(f"meta {args}".split())
def _main(argv):
parser = argparse.ArgumentParser(description="OpenDP release tool")
parser.add_argument("-u", "--repo-url", default="<EMAIL>:opendp/opendp.git", help="Remote repo URL")
parser.add_argument("-d", "--repo-dir", default="/tmp/opendp-release", help="Local repo directory")
subparsers = parser.add_subparsers(dest="COMMAND", help="Command to run")
subparsers.required = True
subparser = subparsers.add_parser("init", help="Initialize the release process")
subparser.set_defaults(func=init)
subparser.add_argument("-c", "--clone", dest="clone", action="store_true", default=True)
subparser.add_argument("-nc", "--no-clone", dest="clone", action="store_false")
subparser.add_argument("-b", "--base-version")
subparser.add_argument("-t", "--type", choices=["major", "minor", "patch"], required=True)
subparser.add_argument("-r", "--ref")
subparser.add_argument("-f", "--force", dest="force", action="store_true", default=False)
subparser.add_argument("-nf", "--no-force", dest="force", action="store_false")
subparser = subparsers.add_parser("cherry", help="Cherry pick commits")
subparser.set_defaults(func=cherry)
subparser.add_argument("commit", nargs="+")
subparser = subparsers.add_parser("changelog", help="Update CHANGELOG file")
subparser.set_defaults(func=changelog)
subparser = subparsers.add_parser("version", help="Update versioned files")
subparser.set_defaults(func=version)
subparser.add_argument("-p", "--prerelease-number", type=int)
subparser = subparsers.add_parser("preflight", help="Run preflight test")
subparser.set_defaults(func=preflight)
subparser.add_argument("-e", "--venv", default="preflight-venv", help="Virtual environment directory")
subparser = subparsers.add_parser("create", help="Create a release")
subparser.set_defaults(func=create)
subparser.add_argument("-f", "--force", dest="force", action="store_true", default=False)
subparser.add_argument("-nf", "--no-force", dest="force", action="store_false")
subparser.add_argument("-n", "--draft", dest="draft", action="store_true", default=False)
subparser.add_argument("-nn", "--no-draft", dest="draft", action="store_false")
subparser = subparsers.add_parser("watch", help="Watch release progress")
subparser.set_defaults(func=watch)
subparser = subparsers.add_parser("postflight", help="Run postflight test")
subparser.set_defaults(func=postflight)
subparser.add_argument("-e", "--venv", default="postflight-venv", help="Virtual environment directory")
subparser = subparsers.add_parser("reconcile", help="Reconcile after the final release")
subparser.set_defaults(func=reconcile)
subparser.add_argument("-f", "--force", dest="force", action="store_true", default=False)
subparser.add_argument("-nf", "--no-force", dest="force", action="store_false")
subparser.add_argument("-n", "--draft", dest="draft", action="store_true", default=False)
subparser.add_argument("-nn", "--no-draft", dest="draft", action="store_false")
subparser = subparsers.add_parser("major", help="Execute a typical major release")
subparser.set_defaults(func=meta, command="major")
subparser = subparsers.add_parser("minor", help="Execute a typical minor release")
subparser.set_defaults(func=meta, command="minor")
subparser = subparsers.add_parser("patch", help="Execute a typical patch release")
subparser.set_defaults(func=meta, command="patch")
subparser.add_argument("commit", nargs="+")
args = parser.parse_args(argv[1:])
args.func(args)
def main():
_main(sys.argv)
if __name__ == "__main__":
main() | 0.352202 | 0.144783 |
def romanToDecimal(roman_number):
roman_list = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}
result = 0
for index,current_number in enumerate(roman_number):
if (index+1) == len(roman_number) or roman_list[current_number] >= roman_list[roman_number[index+1]]:
result+=roman_list[current_number]
else:
result-=roman_list[current_number]
return result
'''
arabes=['I','II','III','IV','V','VI','VII','VIII','IX','X','XI','XII','XIII','XIV','XV','XVI','XVII','XVIII','XIX','XX','XXI','XXII','XXIII','XXIV','XXV','XXVI','XXVII','XXVIII','XXIX','XXX','XXXI','XXXII','XXXIII','XXXIV','XXXV','XXXVI','XXXVII','XXXVIII','XXXIX','XL','XLI','XLII','XLIII','XLIV','XLV','XLVI','XLVII','XLVIII','XLIX','L']
for i in arabes:
roman_to_decimal(i)
'''
def sortRoman(names):
listOrdererByName = []
listOrdererByNumber = []
middleList = []
tempList = []
# To generate the equivalent decimal number in a list of tuples
for currentName in names:
listOrdererByName.append((currentName.split(' ')[0],currentName.split(' ')[1],romanToDecimal(currentName.split(' ')[1])))
# To order by name
listOrdererByName=sorted(listOrdererByName)
# To split equal list of names
for index,currentName in enumerate(listOrdererByName):
if index == 0:
tempList.append(currentName)
else:
# If the current name is equal to the previous, then append it in the same sublist
if currentName[0] == listOrdererByName[index-1][0]:
tempList.append(currentName)
# Ultimo nombre
if index == (len(listOrdererByName)-1):
middleList.append(tempList)
# In other case, append it in a new sublist
else:
middleList.append(tempList)
tempList=[]
tempList.append(currentName)
# Last iteration
if index == (len(listOrdererByName)-1):
middleList.append(tempList)
# To order sublist
for subList in middleList:
tempList=[]
tempList.append(sorted(subList,key=lambda x:x[2]))
# Finally, generate the resulting list
for index,currentName in enumerate(tempList[0]):
listOrdererByNumber.append(currentName[0]+' '+currentName[1])
return print(listOrdererByNumber)
def main():
names=['<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>']
sortRoman(names)
names=['<NAME>','<NAME>']
sortRoman(names)
if __name__ == '__main__':
main() | ancestral_names/sortRoman.py | def romanToDecimal(roman_number):
roman_list = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}
result = 0
for index,current_number in enumerate(roman_number):
if (index+1) == len(roman_number) or roman_list[current_number] >= roman_list[roman_number[index+1]]:
result+=roman_list[current_number]
else:
result-=roman_list[current_number]
return result
'''
arabes=['I','II','III','IV','V','VI','VII','VIII','IX','X','XI','XII','XIII','XIV','XV','XVI','XVII','XVIII','XIX','XX','XXI','XXII','XXIII','XXIV','XXV','XXVI','XXVII','XXVIII','XXIX','XXX','XXXI','XXXII','XXXIII','XXXIV','XXXV','XXXVI','XXXVII','XXXVIII','XXXIX','XL','XLI','XLII','XLIII','XLIV','XLV','XLVI','XLVII','XLVIII','XLIX','L']
for i in arabes:
roman_to_decimal(i)
'''
def sortRoman(names):
listOrdererByName = []
listOrdererByNumber = []
middleList = []
tempList = []
# To generate the equivalent decimal number in a list of tuples
for currentName in names:
listOrdererByName.append((currentName.split(' ')[0],currentName.split(' ')[1],romanToDecimal(currentName.split(' ')[1])))
# To order by name
listOrdererByName=sorted(listOrdererByName)
# To split equal list of names
for index,currentName in enumerate(listOrdererByName):
if index == 0:
tempList.append(currentName)
else:
# If the current name is equal to the previous, then append it in the same sublist
if currentName[0] == listOrdererByName[index-1][0]:
tempList.append(currentName)
# Ultimo nombre
if index == (len(listOrdererByName)-1):
middleList.append(tempList)
# In other case, append it in a new sublist
else:
middleList.append(tempList)
tempList=[]
tempList.append(currentName)
# Last iteration
if index == (len(listOrdererByName)-1):
middleList.append(tempList)
# To order sublist
for subList in middleList:
tempList=[]
tempList.append(sorted(subList,key=lambda x:x[2]))
# Finally, generate the resulting list
for index,currentName in enumerate(tempList[0]):
listOrdererByNumber.append(currentName[0]+' '+currentName[1])
return print(listOrdererByNumber)
def main():
names=['<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>']
sortRoman(names)
names=['<NAME>','<NAME>']
sortRoman(names)
if __name__ == '__main__':
main() | 0.138899 | 0.213213 |
__author__ = 'rayleigh'
from wiking.models import Article, ArticleVersion
import os
class ArticleService:
ROOT_PATH = '/wiki'
PATH_NOT_FIND = 1
NO_ERRORS = 0
def __init__(self):
pass
@staticmethod
def get_absolute_url(article, articles=[]):
return os.path.join(ArticleService.get_parent_path(article, articles), article.slug)
@staticmethod
def get_parent_path(article, articles=[]):
path = ArticleService.ROOT_PATH
if article.project is not None:
path = os.path.join(path, 'project')
path = os.path.join(path, str(article.project.id))
for article in articles:
path = os.path.join(path, article.slug)
return path
@staticmethod
def jump_to_revision(article, revision_id, user):
try:
revision = ArticleVersion.objects.get(pk=revision_id, article=article)
except ArticleVersion.DoesNotExist:
return False
data = dict()
data['title'] = revision.title
data['content'] = revision.content
data['comment'] = 'Востановлено из версии ' + str(revision.version)
data['article'] = article
new_revision = ArticleService.create_version(data, user, article.head.version)
article.head = new_revision
article.save()
return True
@staticmethod
def create_article(data, user):
article = Article()
article.parent = data['parent']
article.slug = data['slug']
article.owner = user
article.project = data['project']
if data['parent']:
article.level = data['parent'].level + 1
article.save()
#head pointer
data['article'] = article
version = ArticleService.create_version(data, user)
article.head = version
article.save()
return article
@staticmethod
def update_article(article, data, user):
data['article'] = article
new_version = ArticleService.create_version(data, user, article.head.version)
article.head = new_version
article.save()
return article
@staticmethod
def articles(*args, **kwargs):
try:
articles = Article.objects.filter(*args, **kwargs)
return articles
except Article.DoesNotExist:
return []
@staticmethod
def create_version(data, user, last_version=0):
version = ArticleVersion()
version.title = data['title']
version.content = data['content']
version.comment = data['comment']
version.article = data['article']
version.author = user
version.version = last_version + 1
version.save()
return version
@staticmethod
def can_write(article, user):
if user.is_staff:
return True
if article.deleted:
return False
if article.owner.id == user.id:
return True
return user.get_profile().hasRole(article.project)
@staticmethod
def can_create(user, project=None):
if user.is_staff:
return True
if project is None:
return False
return user.get_profile().hasRole(project)
@staticmethod
def can_read(article, user):
if user.is_staff:
return True
if article.deleted:
return False
if article.project is None:
return True
return user.get_profile().hasRole(article.project)
@staticmethod
def get_breadcrumbs(articles):
output = list()
articles = list(articles)
while len(articles) > 0:
article = articles.pop()
path = ArticleService.get_absolute_url(article, articles)
output.append({'path': path, 'article': article})
output.reverse()
return output
@staticmethod
def get_article(parent, slug, project=None):
try:
article = Article.objects.get(slug=slug, parent=parent, deleted=False, project=project)
return article
except Article.DoesNotExist:
return None
@staticmethod
def get_form_data(article):
data = dict()
data['slug'] = article.slug
data['comment'] = ''
data['content'] = article.get_content()
data['title'] = article.get_title()
return data
@staticmethod
def get_revisions(article, page=0, limit=10):
start = (page-1) * limit
limit *= page
try:
revisions = ArticleVersion.objects.filter(article=article).order_by('-version')[start:limit]
return revisions
except ArticleVersion.DoesNotExist:
return []
@staticmethod
def get_create_path(article_slug, project=None):
path = ArticleService.ROOT_PATH
if project is not None:
path = os.path.join(path, 'project')
path = os.path.join(path, str(project.id))
return os.path.join(path, 'new') + '?slug=' + article_slug
@staticmethod
def parse_slug(raw_slug, project=None):
slugs = raw_slug.strip('/').split('/')
slug = slugs.pop()
parent = None
articles = []
error = ArticleService.NO_ERRORS
if len(slugs) > 0:
parent_level = len(slugs) - 1
parent_slug = slugs[len(slugs) - 1]
articles = Article.objects.filter(slug__in=slugs, project=project).order_by('level')
if len(slugs) != len(articles):
error = ArticleService.PATH_NOT_FIND
for article in articles:
if article.slug == parent_slug and parent_level == article.level:
parent = article
return parent, slug, articles, error | wiking/services/articles.py | __author__ = 'rayleigh'
from wiking.models import Article, ArticleVersion
import os
class ArticleService:
ROOT_PATH = '/wiki'
PATH_NOT_FIND = 1
NO_ERRORS = 0
def __init__(self):
pass
@staticmethod
def get_absolute_url(article, articles=[]):
return os.path.join(ArticleService.get_parent_path(article, articles), article.slug)
@staticmethod
def get_parent_path(article, articles=[]):
path = ArticleService.ROOT_PATH
if article.project is not None:
path = os.path.join(path, 'project')
path = os.path.join(path, str(article.project.id))
for article in articles:
path = os.path.join(path, article.slug)
return path
@staticmethod
def jump_to_revision(article, revision_id, user):
try:
revision = ArticleVersion.objects.get(pk=revision_id, article=article)
except ArticleVersion.DoesNotExist:
return False
data = dict()
data['title'] = revision.title
data['content'] = revision.content
data['comment'] = 'Востановлено из версии ' + str(revision.version)
data['article'] = article
new_revision = ArticleService.create_version(data, user, article.head.version)
article.head = new_revision
article.save()
return True
@staticmethod
def create_article(data, user):
article = Article()
article.parent = data['parent']
article.slug = data['slug']
article.owner = user
article.project = data['project']
if data['parent']:
article.level = data['parent'].level + 1
article.save()
#head pointer
data['article'] = article
version = ArticleService.create_version(data, user)
article.head = version
article.save()
return article
@staticmethod
def update_article(article, data, user):
data['article'] = article
new_version = ArticleService.create_version(data, user, article.head.version)
article.head = new_version
article.save()
return article
@staticmethod
def articles(*args, **kwargs):
try:
articles = Article.objects.filter(*args, **kwargs)
return articles
except Article.DoesNotExist:
return []
@staticmethod
def create_version(data, user, last_version=0):
version = ArticleVersion()
version.title = data['title']
version.content = data['content']
version.comment = data['comment']
version.article = data['article']
version.author = user
version.version = last_version + 1
version.save()
return version
@staticmethod
def can_write(article, user):
if user.is_staff:
return True
if article.deleted:
return False
if article.owner.id == user.id:
return True
return user.get_profile().hasRole(article.project)
@staticmethod
def can_create(user, project=None):
if user.is_staff:
return True
if project is None:
return False
return user.get_profile().hasRole(project)
@staticmethod
def can_read(article, user):
if user.is_staff:
return True
if article.deleted:
return False
if article.project is None:
return True
return user.get_profile().hasRole(article.project)
@staticmethod
def get_breadcrumbs(articles):
output = list()
articles = list(articles)
while len(articles) > 0:
article = articles.pop()
path = ArticleService.get_absolute_url(article, articles)
output.append({'path': path, 'article': article})
output.reverse()
return output
@staticmethod
def get_article(parent, slug, project=None):
try:
article = Article.objects.get(slug=slug, parent=parent, deleted=False, project=project)
return article
except Article.DoesNotExist:
return None
@staticmethod
def get_form_data(article):
data = dict()
data['slug'] = article.slug
data['comment'] = ''
data['content'] = article.get_content()
data['title'] = article.get_title()
return data
@staticmethod
def get_revisions(article, page=0, limit=10):
start = (page-1) * limit
limit *= page
try:
revisions = ArticleVersion.objects.filter(article=article).order_by('-version')[start:limit]
return revisions
except ArticleVersion.DoesNotExist:
return []
@staticmethod
def get_create_path(article_slug, project=None):
path = ArticleService.ROOT_PATH
if project is not None:
path = os.path.join(path, 'project')
path = os.path.join(path, str(project.id))
return os.path.join(path, 'new') + '?slug=' + article_slug
@staticmethod
def parse_slug(raw_slug, project=None):
slugs = raw_slug.strip('/').split('/')
slug = slugs.pop()
parent = None
articles = []
error = ArticleService.NO_ERRORS
if len(slugs) > 0:
parent_level = len(slugs) - 1
parent_slug = slugs[len(slugs) - 1]
articles = Article.objects.filter(slug__in=slugs, project=project).order_by('level')
if len(slugs) != len(articles):
error = ArticleService.PATH_NOT_FIND
for article in articles:
if article.slug == parent_slug and parent_level == article.level:
parent = article
return parent, slug, articles, error | 0.389314 | 0.101634 |
import copy
from typing import Dict, List
from polyaxon import types
from polyaxon.exceptions import PolyaxonfileError, PolyaxonSchemaError
from polyaxon.polyaxonfile.specs import BaseSpecification, kinds
from polyaxon.polyaxonfile.specs.libs.parser import Parser
from polyaxon.polyflow import (
ParamSpec,
V1CompiledOperation,
V1Dag,
V1Hook,
V1Init,
V1Param,
)
class CompiledOperationSpecification(BaseSpecification):
"""The polyaxonfile specification for compiled operation."""
_SPEC_KIND = kinds.COMPILED_OPERATION
CONFIG = V1CompiledOperation
@staticmethod
def dict_to_param_spec(contexts: Dict = None, is_context: bool = False):
contexts = contexts or {}
return {
k: ParamSpec(
name=k,
param=V1Param(value=v),
type=types.ANY,
is_flag=False,
is_list=None,
is_context=is_context,
arg_format=None,
)
for k, v in contexts.items()
}
@classmethod
def calculate_context_spec(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
should_be_resolved: bool = False,
) -> Dict[str, ParamSpec]:
param_spec = config.validate_params(
is_template=False,
check_runs=True,
parse_values=True,
parse_joins=not should_be_resolved,
)
if should_be_resolved:
for p_spec in param_spec:
if not p_spec.param.is_literal:
raise PolyaxonfileError(
"calculate_context_spec received a non-resolved "
"ref param `{}` with value `{}`".format(
p_spec.name, p_spec.param.to_dict()
)
)
param_spec = {param.name: param for param in param_spec}
param_spec.update(cls.dict_to_param_spec(contexts=contexts, is_context=True))
return param_spec
@classmethod
def _apply_operation_contexts(
cls,
config: V1CompiledOperation,
param_spec: Dict[str, ParamSpec] = None,
contexts: Dict = None,
) -> V1CompiledOperation:
if not param_spec:
param_spec = cls.calculate_context_spec(config=config, contexts=contexts)
parsed_data = Parser.parse_operation(config, param_spec or {})
return cls.CONFIG.read(parsed_data)
@staticmethod
def _apply_dag_context(config: V1CompiledOperation) -> V1CompiledOperation:
dag_run = config.run # type: V1Dag
dag_run.process_dag()
dag_run.validate_dag()
dag_run.process_components(config.inputs)
return config
@classmethod
def apply_operation_contexts(
cls,
config: V1CompiledOperation,
param_spec: Dict[str, ParamSpec] = None,
contexts: Dict = None,
) -> V1CompiledOperation:
if config.is_dag_run:
return cls._apply_dag_context(config)
else:
return cls._apply_operation_contexts(
config=config, param_spec=param_spec, contexts=contexts
)
@classmethod
def _apply_connections_params(
cls,
connections: List[str],
init: List[V1Init],
artifact_store: str = None,
param_spec: Dict[str, ParamSpec] = None,
):
if connections:
connections = Parser.parse_section(
connections, param_spec=param_spec, parse_params=True
)
_init = []
if init:
for i in init:
if i.artifacts and not i.connection:
i.connection = artifact_store
resolved_i = V1Init.from_dict(
Parser.parse_section(
i.to_dict(), param_spec=param_spec, parse_params=True
)
)
_init.append(resolved_i)
# Prepend any param that has to_init after validation
init_params = [v.to_init() for v in param_spec.values() if v.validate_to_init()]
init_params = [v for v in init_params if v]
_init = init_params + _init
return _init, connections
@classmethod
def _apply_distributed_run_connections_params(
cls,
config: V1CompiledOperation,
artifact_store: str = None,
param_spec: Dict[str, ParamSpec] = None,
):
def _resolve_replica(replica):
if not replica:
return
init, connections = cls._apply_connections_params(
init=replica.init,
connections=replica.connections,
artifact_store=artifact_store,
param_spec=param_spec,
)
replica.init = init
replica.connections = connections
return replica
if config.is_mpi_job_run:
config.run.launcher = _resolve_replica(config.run.launcher)
config.run.worker = _resolve_replica(config.run.worker)
if config.is_tf_job_run:
config.run.chief = _resolve_replica(config.run.chief)
config.run.worker = _resolve_replica(config.run.worker)
config.run.ps = _resolve_replica(config.run.ps)
config.run.evaluator = _resolve_replica(config.run.evaluator)
if config.is_pytorch_job_run:
config.run.master = _resolve_replica(config.run.master)
config.run.worker = _resolve_replica(config.run.worker)
return config
@classmethod
def apply_run_connections_params(
cls,
config: V1CompiledOperation,
artifact_store: str = None,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> V1CompiledOperation:
if not param_spec:
param_spec = cls.calculate_context_spec(config=config, contexts=contexts)
if config.is_job_run or config.is_service_run:
init, connections = cls._apply_connections_params(
init=config.run.init,
connections=config.run.connections,
artifact_store=artifact_store,
param_spec=param_spec,
)
config.run.init = init
config.run.connections = connections
return config
if config.is_distributed_run:
return cls._apply_distributed_run_connections_params(
config=config,
artifact_store=artifact_store,
param_spec=param_spec,
)
return config
@classmethod
def apply_params(
cls,
config: V1CompiledOperation,
params: Dict = None,
context: Dict = None,
) -> V1CompiledOperation:
config.apply_params(params, context)
return config
@classmethod
def apply_section_contexts(
cls,
config: V1CompiledOperation,
section,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
):
if not param_spec:
param_spec = cls.calculate_context_spec(config=config, contexts=contexts)
return Parser.parse_section(section, param_spec)
@classmethod
def _apply_runtime_contexts(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> V1CompiledOperation:
if not param_spec:
param_spec = cls.calculate_context_spec(
config=config, contexts=contexts, should_be_resolved=True
)
parsed_data = Parser.parse_runtime(config.to_dict(), param_spec)
return cls.CONFIG.read(parsed_data)
@classmethod
def _apply_distributed_runtime_contexts(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> V1CompiledOperation:
if not param_spec:
# Calculate the param_space once with empty contexts
replica_param_spec = cls.calculate_context_spec(
config=config, contexts=None, should_be_resolved=True
)
param_spec = {}
for k in contexts:
param_spec[k] = copy.copy(replica_param_spec)
param_spec[k].update(
cls.dict_to_param_spec(contexts=contexts[k], is_context=True)
)
parsed_data = Parser.parse_distributed_runtime(config.to_dict(), param_spec)
return cls.CONFIG.read(parsed_data)
@classmethod
def apply_runtime_contexts(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> V1CompiledOperation:
if config.has_pipeline:
raise PolyaxonSchemaError(
"This method is not allowed on this specification."
)
if config.is_distributed_run:
return cls._apply_distributed_runtime_contexts(
config=config,
contexts=contexts,
param_spec=param_spec,
)
else:
return cls._apply_runtime_contexts(
config=config,
contexts=contexts,
param_spec=param_spec,
)
@classmethod
def apply_hooks_contexts(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> List[V1Hook]:
if not param_spec:
param_spec = cls.calculate_context_spec(
config=config, contexts=contexts, should_be_resolved=True
)
hooks = Parser.parse_hooks(config, param_spec)
return [V1Hook.read(hook) for hook in hooks] | core/polyaxon/polyaxonfile/specs/compiled_operation.py | import copy
from typing import Dict, List
from polyaxon import types
from polyaxon.exceptions import PolyaxonfileError, PolyaxonSchemaError
from polyaxon.polyaxonfile.specs import BaseSpecification, kinds
from polyaxon.polyaxonfile.specs.libs.parser import Parser
from polyaxon.polyflow import (
ParamSpec,
V1CompiledOperation,
V1Dag,
V1Hook,
V1Init,
V1Param,
)
class CompiledOperationSpecification(BaseSpecification):
"""The polyaxonfile specification for compiled operation."""
_SPEC_KIND = kinds.COMPILED_OPERATION
CONFIG = V1CompiledOperation
@staticmethod
def dict_to_param_spec(contexts: Dict = None, is_context: bool = False):
contexts = contexts or {}
return {
k: ParamSpec(
name=k,
param=V1Param(value=v),
type=types.ANY,
is_flag=False,
is_list=None,
is_context=is_context,
arg_format=None,
)
for k, v in contexts.items()
}
@classmethod
def calculate_context_spec(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
should_be_resolved: bool = False,
) -> Dict[str, ParamSpec]:
param_spec = config.validate_params(
is_template=False,
check_runs=True,
parse_values=True,
parse_joins=not should_be_resolved,
)
if should_be_resolved:
for p_spec in param_spec:
if not p_spec.param.is_literal:
raise PolyaxonfileError(
"calculate_context_spec received a non-resolved "
"ref param `{}` with value `{}`".format(
p_spec.name, p_spec.param.to_dict()
)
)
param_spec = {param.name: param for param in param_spec}
param_spec.update(cls.dict_to_param_spec(contexts=contexts, is_context=True))
return param_spec
@classmethod
def _apply_operation_contexts(
cls,
config: V1CompiledOperation,
param_spec: Dict[str, ParamSpec] = None,
contexts: Dict = None,
) -> V1CompiledOperation:
if not param_spec:
param_spec = cls.calculate_context_spec(config=config, contexts=contexts)
parsed_data = Parser.parse_operation(config, param_spec or {})
return cls.CONFIG.read(parsed_data)
@staticmethod
def _apply_dag_context(config: V1CompiledOperation) -> V1CompiledOperation:
dag_run = config.run # type: V1Dag
dag_run.process_dag()
dag_run.validate_dag()
dag_run.process_components(config.inputs)
return config
@classmethod
def apply_operation_contexts(
cls,
config: V1CompiledOperation,
param_spec: Dict[str, ParamSpec] = None,
contexts: Dict = None,
) -> V1CompiledOperation:
if config.is_dag_run:
return cls._apply_dag_context(config)
else:
return cls._apply_operation_contexts(
config=config, param_spec=param_spec, contexts=contexts
)
@classmethod
def _apply_connections_params(
cls,
connections: List[str],
init: List[V1Init],
artifact_store: str = None,
param_spec: Dict[str, ParamSpec] = None,
):
if connections:
connections = Parser.parse_section(
connections, param_spec=param_spec, parse_params=True
)
_init = []
if init:
for i in init:
if i.artifacts and not i.connection:
i.connection = artifact_store
resolved_i = V1Init.from_dict(
Parser.parse_section(
i.to_dict(), param_spec=param_spec, parse_params=True
)
)
_init.append(resolved_i)
# Prepend any param that has to_init after validation
init_params = [v.to_init() for v in param_spec.values() if v.validate_to_init()]
init_params = [v for v in init_params if v]
_init = init_params + _init
return _init, connections
@classmethod
def _apply_distributed_run_connections_params(
cls,
config: V1CompiledOperation,
artifact_store: str = None,
param_spec: Dict[str, ParamSpec] = None,
):
def _resolve_replica(replica):
if not replica:
return
init, connections = cls._apply_connections_params(
init=replica.init,
connections=replica.connections,
artifact_store=artifact_store,
param_spec=param_spec,
)
replica.init = init
replica.connections = connections
return replica
if config.is_mpi_job_run:
config.run.launcher = _resolve_replica(config.run.launcher)
config.run.worker = _resolve_replica(config.run.worker)
if config.is_tf_job_run:
config.run.chief = _resolve_replica(config.run.chief)
config.run.worker = _resolve_replica(config.run.worker)
config.run.ps = _resolve_replica(config.run.ps)
config.run.evaluator = _resolve_replica(config.run.evaluator)
if config.is_pytorch_job_run:
config.run.master = _resolve_replica(config.run.master)
config.run.worker = _resolve_replica(config.run.worker)
return config
@classmethod
def apply_run_connections_params(
cls,
config: V1CompiledOperation,
artifact_store: str = None,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> V1CompiledOperation:
if not param_spec:
param_spec = cls.calculate_context_spec(config=config, contexts=contexts)
if config.is_job_run or config.is_service_run:
init, connections = cls._apply_connections_params(
init=config.run.init,
connections=config.run.connections,
artifact_store=artifact_store,
param_spec=param_spec,
)
config.run.init = init
config.run.connections = connections
return config
if config.is_distributed_run:
return cls._apply_distributed_run_connections_params(
config=config,
artifact_store=artifact_store,
param_spec=param_spec,
)
return config
@classmethod
def apply_params(
cls,
config: V1CompiledOperation,
params: Dict = None,
context: Dict = None,
) -> V1CompiledOperation:
config.apply_params(params, context)
return config
@classmethod
def apply_section_contexts(
cls,
config: V1CompiledOperation,
section,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
):
if not param_spec:
param_spec = cls.calculate_context_spec(config=config, contexts=contexts)
return Parser.parse_section(section, param_spec)
@classmethod
def _apply_runtime_contexts(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> V1CompiledOperation:
if not param_spec:
param_spec = cls.calculate_context_spec(
config=config, contexts=contexts, should_be_resolved=True
)
parsed_data = Parser.parse_runtime(config.to_dict(), param_spec)
return cls.CONFIG.read(parsed_data)
@classmethod
def _apply_distributed_runtime_contexts(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> V1CompiledOperation:
if not param_spec:
# Calculate the param_space once with empty contexts
replica_param_spec = cls.calculate_context_spec(
config=config, contexts=None, should_be_resolved=True
)
param_spec = {}
for k in contexts:
param_spec[k] = copy.copy(replica_param_spec)
param_spec[k].update(
cls.dict_to_param_spec(contexts=contexts[k], is_context=True)
)
parsed_data = Parser.parse_distributed_runtime(config.to_dict(), param_spec)
return cls.CONFIG.read(parsed_data)
@classmethod
def apply_runtime_contexts(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> V1CompiledOperation:
if config.has_pipeline:
raise PolyaxonSchemaError(
"This method is not allowed on this specification."
)
if config.is_distributed_run:
return cls._apply_distributed_runtime_contexts(
config=config,
contexts=contexts,
param_spec=param_spec,
)
else:
return cls._apply_runtime_contexts(
config=config,
contexts=contexts,
param_spec=param_spec,
)
@classmethod
def apply_hooks_contexts(
cls,
config: V1CompiledOperation,
contexts: Dict = None,
param_spec: Dict[str, ParamSpec] = None,
) -> List[V1Hook]:
if not param_spec:
param_spec = cls.calculate_context_spec(
config=config, contexts=contexts, should_be_resolved=True
)
hooks = Parser.parse_hooks(config, param_spec)
return [V1Hook.read(hook) for hook in hooks] | 0.78436 | 0.20199 |
import math
import numpy
import statsmodels.api as sm
lowess= sm.nonparametric.lowess
import esutil
from galpy.util import bovy_coords, bovy_plot
from scipy.interpolate import interp1d,UnivariateSpline
import apogee.tools.read as apread
import isodist
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
import apogee.tools.read as apread
from apogee.select import apogeeSelect
from astropy.io import fits
from astropy.table import Table, join
_R0= 8. # kpc
_Z0= 0.025 # kpc
_FEHTAG= 'FE_H'
_AFETAG= 'AVG_ALPHAFE'
_AFELABEL= r'$[\left([\mathrm{O+Mg+Si+S+Ca}]/5\right)/\mathrm{Fe}]$'
catpath = '../catalogues/'
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
def get_rgbsample(loggcut = [1.8, 3.0],
teffcut = [0, 10000],
add_ages = False,
agetype='Martig',
apply_corrections=False,
distance_correction=False,
verbose = False):
"""
Get a clean sample of dr12 APOGEE data with Michael Haydens distances
---
INPUT:
None
OUTPUT:
Clean rgb sample with added distances
HISTORY:
Started - Mackereth 02/06/16
"""
#get the allStar catalogue using apogee python (exlude all bad flags etc)
allStar = apread.allStar(rmcommissioning=True,
exclude_star_bad=True,
exclude_star_warn=True,
main=True,
ak=True,
adddist=False)
#cut to a 'sensible' logg range (giants which are not too high on the RGB)
allStar = allStar[(allStar['LOGG'] > loggcut[0])&(allStar['LOGG'] < loggcut[1])&
(allStar['TEFF'] > teffcut[0])&(allStar['TEFF'] < teffcut[1])]
if verbose == True:
print str(len(allStar))+' Stars before Distance catalogue join (after Log(g) cut)'
#load the distance VAC
dists = fits.open(catpath+'DR12_DIST_R-GC.fits')[1].data
#convert to astropy Table
allStar_tab = Table(data=allStar)
dists_tab = Table(data=dists)
#join table
tab = join(allStar_tab, dists_tab, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
data = tab.as_array()
data= esutil.numpy_util.add_fields(data,[('M_J', float),
('M_H', float),
('M_K', float),
('MH50_DIST', float),
('MH50_GALR', float),
('MH50_GALZ', float),
('MH50_GALPHI', float),
('AVG_ALPHAFE', float)])
data['MH50_DIST'] = (10**((data['HAYDEN_DISTMOD_50']+5)/5))/1e3
if distance_correction == True:
data['MH50_DIST'] *= 1.05
XYZ= bovy_coords.lbd_to_XYZ(data['GLON'],
data['GLAT'],
data['MH50_DIST'],
degree=True)
RphiZ= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],
XYZ[:,1],
XYZ[:,2],
Xsun=8.,Zsun=0.025)
data['MH50_GALR']= RphiZ[:,0]
data['MH50_GALPHI']= RphiZ[:,1]
data['MH50_GALZ']= RphiZ[:,2]
data['M_J'] = data['J0']-data['HAYDEN_DISTMOD_50']
data['M_H'] = data['H0']-data['HAYDEN_DISTMOD_50']
data['M_K'] = data['K0']-data['HAYDEN_DISTMOD_50']
data['AVG_ALPHAFE'] = avg_alphafe_dr12(data)
data[_FEHTAG] += -0.1
#remove locations not in the apogee selection function (FIND OUT WHATS UP HERE)
data = data[np.in1d(data['LOCATION_ID'], apo.list_fields())]
# Remove locations outside of the Pan-STARRS dust map
# In the Southern hemisphere
data= data[data['LOCATION_ID'] != 4266] #240,-18
data= data[data['LOCATION_ID'] != 4331] #5.5,-14.2
data= data[data['LOCATION_ID'] != 4381] #5.2,-12.2
data= data[data['LOCATION_ID'] != 4332] #1,-4
data= data[data['LOCATION_ID'] != 4329] #0,-5
data= data[data['LOCATION_ID'] != 4351] #0,-2
data= data[data['LOCATION_ID'] != 4353] #358,0
data= data[data['LOCATION_ID'] != 4385] #358.6,1.4
# Close to the ecliptic pole where there's no data (is it the ecliptic pole?
data= data[data['LOCATION_ID'] != 4528] #120,30
data= data[data['LOCATION_ID'] != 4217] #123,22.4
#remove any non-finite magnitudes
data = data[np.isfinite(data['M_H'])]
if verbose == True:
print str(len(data))+' Stars with distance measures (and in good fields...)'
if add_ages == True:
if agetype == 'Martig':
ages = fits.open(catpath+'DR12_martigages_vizier.fits')[1].data
idtag = '2MASS_ID'
if agetype == 'Cannon':
ages = fits.open(catpath+'RGB_Cannon_Ages.fits')[1].data
ages = esutil.numpy_util.add_fields(ages,[('Age', float)])
ages['Age'] = np.exp(ages['ln_age'])
idtag = 'ID'
ages_tab = Table(data=ages)
ages_tab.rename_column(idtag, 'APOGEE_ID')
tab = join( ages_tab,data, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
allStar_full = tab.as_array()
data = allStar_full
if verbose == True:
print str(len(data))+' Stars with ages'
if apply_corrections == True:
#martig1 = np.genfromtxt(catpath+'martig2016_table1.txt', dtype=None, names=True, skip_header=2)
martig1 = fits.open(catpath+'martig_table1.fits')
fit = lowess(np.log10(martig1['Age_out']),np.log10(martig1['Age_in']))
xs = np.linspace(-0.3,1.2,100)
xsinterpolate = interp1d(xs,xs)
fys = fit[:,0]-xsinterpolate(fit[:,1])
interp = UnivariateSpline(fit[:,1], fys)
corr_age = np.log10(data['Age'])+(interp(np.log10(data['Age'])))
corr_age = 10**corr_age
data['Age'] = corr_age
return data
def avg_alphafe_dr12(data):
weight_o= np.ones(len(data))
weight_s= np.ones(len(data))
weight_si= np.ones(len(data))
weight_ca= np.ones(len(data))
weight_mg= np.ones(len(data))
weight_o[data['O_H'] == -9999.0]= 0.
weight_s[data['S_H'] == -9999.0]= 0.
weight_si[data['SI_H'] == -9999.0]= 0.
weight_ca[data['CA_H'] == -9999.0]= 0.
weight_mg[data['MG_H'] == -9999.0]= 0.
return (weight_o*data['O_H']+weight_s*data['S_H']
+weight_si*data['SI_H']+weight_ca*data['CA_H']
+weight_mg*data['MG_H'])/(weight_o+weight_s
+weight_si+weight_ca
+weight_mg)\
-data['FE_H']-0.05
# Define the low-alpha, low-iron sample
def _lowlow_lowfeh(afe):
# The low metallicity edge
return -0.6
def _lowlow_highfeh(afe):
# The high metallicity edge
return -0.25
def _lowlow_lowafe(feh):
# The low alpha edge (-0.15,-0.075) to (-0.5,0)
return (0--0.075)/(-0.5--0.15)*(feh+0.1--0.15)-0.075
def _lowlow_highafe(feh):
# The high alpha edge (-0.15,0.075) to (-0.5,0.15)
return (0.15-0.075)/(-0.5--0.15)*(feh+0.1--0.15)+0.075
def get_lowlowsample():
"""
NAME:
get_lowlowsample
PURPOSE:
get the RGB sample at low alpha, low iron
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _lowlow_lowfeh(0.)
highfeh= _lowlow_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _lowlow_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _lowlow_highafe(data[_FEHTAG]))
return data[indx]
# Define the high-alpha sample
def _highalpha_lowfeh(afe):
# The low metallicity edge
return -0.8
def _highalpha_highfeh(afe):
# The high metallicity edge
return -0.2
def _highalpha_lowafe(feh):
# The low alpha edge (-0.125,0.115) to (-0.6,0.215)
return (0.2-0.1)/(-0.6--0.125)*(feh+0.1--0.125)+0.115
def _highalpha_highafe(feh):
# The high alpha edge (-0.125,0.19) to (-0.6,0.29)
return (0.275-0.175)/(-0.6--0.125)*(feh+0.1--0.125)+0.19
def get_highalphasample():
"""
NAME:
get_highalphasample
PURPOSE:
get the RC sample at high alpha
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-24 - Started - Bovy (IAS)
"""
# Get the full sample first
data= get_rcsample()
# Now cut it
lowfeh= _highalpha_lowfeh(0.)
highfeh= _highalpha_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _highalpha_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _highalpha_highafe(data[_FEHTAG]))
return data[indx]
# Define the solar sample
def _solar_lowfeh(afe):
# The low metallicity edge
return -0.2
def _solar_highfeh(afe):
# The high metallicity edge
return 0.
def _solar_lowafe(feh):
# The low alpha edge (0.1,-0.075) to (-0.1,-0.075)
return -0.075
def _solar_highafe(feh):
# The high alpha edge (-0.15,0.1) to (0.1,0.05)
return (0.1-0.05)/(-0.15-0.1)*(feh+0.1-0.1)+0.05
def get_solarsample():
"""
NAME:
get_solarsample
PURPOSE:
get the RC sample at solar abundances
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _solar_lowfeh(0.)
highfeh= _solar_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _solar_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _solar_highafe(data[_FEHTAG]))
return data[indx]
# Define the high metallicity sample
def _highfeh_lowfeh(afe):
# The low metallicity edge
return 0.05
def _highfeh_highfeh(afe):
# The high metallicity edge
return 0.3
def _highfeh_lowafe(feh):
# The low alpha edge (0.1,-0.075) to (-0.1,-0.075)
return -0.075
def _highfeh_highafe(feh):
# The high alpha edge (-0.15,0.1) to (0.1,0.05)
return 0.05
def get_highfehsample():
"""
NAME:
get_highfehsample
PURPOSE:
get the RC sample at high [Fe/H]
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _highfeh_lowfeh(0.)
highfeh= _highfeh_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _highfeh_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _highfeh_highafe(data[_FEHTAG]))
return data[indx]
def alphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (0.12/-0.6)*fehs[fehs < 0]+0.03
edge[fehs >= 0] = 0.03
return edge
def highalphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (-0.13/0.6)*fehs[fehs < 0]+0.04
edge[fehs >= 0] = 0.04
return edge
def lowalphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (-0.10/0.6)*fehs[fehs < 0]+0.01
edge[fehs >= 0] = 0.01
return edge
def get_fehage(agebin = [0.,1.], fehbin = [0.,0.2], afebin = 'low', dr=None, agetype='Martig', apply_corrections=False, distance_correction=False):
data = get_rgbsample(add_ages=True, agetype=agetype, apply_corrections=apply_corrections, distance_correction=distance_correction)
if afebin == 'low':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] < alphaedge(data[_FEHTAG]))
if afebin == 'high':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= alphaedge(data[_FEHTAG]))
if afebin == 'highclean':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= highalphaedge(data[_FEHTAG]))
if afebin == 'lowclean':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] <= lowalphaedge(data[_FEHTAG]))
if afebin == 'lownew':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] <= alphaedge(data[_FEHTAG])-0.025)
if afebin == 'highnew':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= alphaedge(data[_FEHTAG])+0.025)
if afebin == None:
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])
return data[indx]
def highalphalocus():
data= get_rgbsample()
indx= (data[_AFETAG] > (0.2-0.1)/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] < -0.225)\
+(data[_AFETAG] > 0.05/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] >= -0.225)*(data[_FEHTAG] < 0.125)\
+(data[_FEHTAG] >= 0.125)
return lowess(data[_AFETAG][indx],data[_FEHTAG][indx],frac=0.6)
def lowalphalocus():
data= get_rgbsample()
indx= (data[_AFETAG] > (0.2-0.1)/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] < -0.025)\
+(data[_AFETAG] > 0.05/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] >= -0.225)*(data[_FEHTAG] < 0.125)
return lowess(data[_AFETAG][True-indx],data[_FEHTAG][True-indx],frac=0.6)
class MAPs:
"""Class that pixelizes the data sample in [Fe/H] and [a/Fe]"""
def __init__(self,data=None,dfeh=0.1,dafe=0.05,fehmin=-0.75,fehmax=0.35,
afemin=-0.075,afemax=0.275):
"""
NAME:
__init__
PURPOSE:
initialize the MAPs
INPUT:
data= (None) the data sample; if None, whole stat. RC sample
dfeh, dafe= pixel size
fehmin, fehmax, afemin, afemax= minimum and maximum FeH and AFe
OUTPUT:
object with pixelized data
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
if data is None: data= get_rcsample()
self.data= data
self.dx= dfeh
self.dy= dafe
self.xmin= fehmin
self.xmax= fehmax
self.ymin= afemin
self.ymax= afemax
# edges in X and Y
self.xedges= numpy.arange(self.xmin,self.xmax+0.01,self.dx)
self.yedges= numpy.arange(self.ymin,self.ymax+0.01,self.dy)
# X and Y
self.x= data[_FEHTAG]
self.y= data[_AFETAG]
return None
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
return the part of the sample in a (feh,afe) pixel
INPUT:
[Fe/H]
[a/Fe]
OUTPUT:
returns data recarray in the bin that feh and afe are in
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#Find bin
xbin= int(math.floor((args[0]-self.xmin)/self.dx))
ybin= int(math.floor((args[1]-self.ymin)/self.dy))
#Return data
return self.data[(self.x > self.xedges[xbin])\
*(self.x <= self.xedges[xbin+1])\
*(self.y > self.yedges[ybin])\
*(self.y <= self.yedges[ybin+1])]
def map(self):
"""
NAME:
map
PURPOSE:
yield a map
INPUT:
(none)
OUTPUT:
iterates over the MAPs
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
nx= int((self.xmax-self.xmin)/self.dx)
ny= int((self.ymax-self.ymin)/self.dy)
gx= numpy.linspace(self.xmin+self.dx/2.,self.xmax-self.dx/2.,nx)
gy= numpy.linspace(self.ymin+self.dy/2.,self.ymax-self.dy/2.,ny)
for ii in range(nx):
for jj in range(ny):
yield self(gx[ii],gy[jj])
def callIndx(self,*args,**kwargs):
"""
NAME:
callIndx
PURPOSE:
return index of the part of the sample in an [Fe/H] and [a/Fe] pixel
INPUT:
[Fe/H]
[a/Fe]
OUTPUT:
returns index into data recarray in the bin that [Fe/H] and [a/Fe] are in
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#Find bin
xbin= int(math.floor((args[0]-self.xmin)/self.dx))
ybin= int(math.floor((args[1]-self.ymin)/self.dy))
#Return data
return (self.x > self.xedges[xbin])\
*(self.x <= self.xedges[xbin+1])\
*(self.y > self.yedges[ybin])\
*(self.y <= self.yedges[ybin+1])
def xindx(self,x):
"""
NAME:
xindx
PURPOSE:
return the index corresponding to a [Fe/H] value
INPUT:
[Fe/H]
OUTPUT:
index
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return int(math.floor((x-self.xmin)/self.dx))
def yindx(self,y):
"""
NAME:
yindx
PURPOSE:
return the index corresponding to a [a/Fe] value
INPUT:
[a/Fe]
OUTPUT:
index
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return int(math.floor((y-self.ymin)/self.dy))
def plot(self,quant,func=numpy.median,minnstar=20.,submediany=False,
returnz=False,justcalc=False,
**kwargs):
"""
NAME:
plot
PURPOSE:
make a plot of a quantity as a function of X and Y
INPUT:
quant - the quantity (string that returns the quantity, like
'METALS') or a function of the data
func - function of quantity to plot
minnstar= minimum number of stars (20)
submeany= subtract the median y
justcalc= (False) if True, do not plot
bovy_plot.bovy_dens2d kwargs
OUTPUT:
plot to output device
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#First create 2D
nx= int((self.xmax-self.xmin)/self.dx)
ny= int((self.ymax-self.ymin)/self.dy)
gx= numpy.linspace(self.xmin+self.dx/2.,self.xmax-self.dx/2.,nx)
gy= numpy.linspace(self.ymin+self.dy/2.,self.ymax-self.dy/2.,ny)
z2d= numpy.empty((nx,ny))
if isinstance(quant,numpy.ndarray):
z2d= numpy.reshape(quant,(nx,ny))
for ii in range(z2d.shape[0]):
for jj in range(z2d.shape[1]):
tdata= self(gx[ii],gy[jj])
if len(tdata) < minnstar:
z2d[ii,jj]= numpy.nan
else:
nbins= 0
for ii in range(z2d.shape[0]):
for jj in range(z2d.shape[1]):
tdata= self(gx[ii],gy[jj])
if len(tdata) < minnstar:
z2d[ii,jj]= numpy.nan
else:
nbins+= 1
if hasattr(quant, '__call__'):
z2d[ii,jj]= func(quant(tdata))
else:
z2d[ii,jj]= func(tdata[quant])
if submediany:
z2d[ii,:]-= \
numpy.median(z2d[ii,True-numpy.isnan(z2d[ii,:])])
if justcalc:
if returnz:
return z2d
else:
return None
#Now plot
xrange= kwargs.pop('xrange',[self.xmin,self.xmax])
yrange= kwargs.pop('yrange',[self.ymin,self.ymax])
if not kwargs.has_key('colorbar'):
kwargs['colorbar']= True
if not kwargs.has_key('shrink'):
kwargs['shrink']= 0.78
if not kwargs.has_key('vmin'):
kwargs['vmin']= numpy.nanmin(z2d)
if not kwargs.has_key('vmax'):
kwargs['vmax']= numpy.nanmax(z2d)
xlabel= r'$[\mathrm{Fe/H}]$'
ylabel= _AFELABEL
cmap= kwargs.pop('cmap','coolwarm')
out= bovy_plot.bovy_dens2d(z2d.T,origin='lower',cmap=cmap,
interpolation='nearest',
xlabel=xlabel,ylabel=ylabel,
xrange=xrange,yrange=yrange,
**kwargs)
if returnz:
return z2d
else:
return out | py/define_rgbsample.py | import math
import numpy
import statsmodels.api as sm
lowess= sm.nonparametric.lowess
import esutil
from galpy.util import bovy_coords, bovy_plot
from scipy.interpolate import interp1d,UnivariateSpline
import apogee.tools.read as apread
import isodist
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
import apogee.tools.read as apread
from apogee.select import apogeeSelect
from astropy.io import fits
from astropy.table import Table, join
_R0= 8. # kpc
_Z0= 0.025 # kpc
_FEHTAG= 'FE_H'
_AFETAG= 'AVG_ALPHAFE'
_AFELABEL= r'$[\left([\mathrm{O+Mg+Si+S+Ca}]/5\right)/\mathrm{Fe}]$'
catpath = '../catalogues/'
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
def get_rgbsample(loggcut = [1.8, 3.0],
teffcut = [0, 10000],
add_ages = False,
agetype='Martig',
apply_corrections=False,
distance_correction=False,
verbose = False):
"""
Get a clean sample of dr12 APOGEE data with Michael Haydens distances
---
INPUT:
None
OUTPUT:
Clean rgb sample with added distances
HISTORY:
Started - Mackereth 02/06/16
"""
#get the allStar catalogue using apogee python (exlude all bad flags etc)
allStar = apread.allStar(rmcommissioning=True,
exclude_star_bad=True,
exclude_star_warn=True,
main=True,
ak=True,
adddist=False)
#cut to a 'sensible' logg range (giants which are not too high on the RGB)
allStar = allStar[(allStar['LOGG'] > loggcut[0])&(allStar['LOGG'] < loggcut[1])&
(allStar['TEFF'] > teffcut[0])&(allStar['TEFF'] < teffcut[1])]
if verbose == True:
print str(len(allStar))+' Stars before Distance catalogue join (after Log(g) cut)'
#load the distance VAC
dists = fits.open(catpath+'DR12_DIST_R-GC.fits')[1].data
#convert to astropy Table
allStar_tab = Table(data=allStar)
dists_tab = Table(data=dists)
#join table
tab = join(allStar_tab, dists_tab, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
data = tab.as_array()
data= esutil.numpy_util.add_fields(data,[('M_J', float),
('M_H', float),
('M_K', float),
('MH50_DIST', float),
('MH50_GALR', float),
('MH50_GALZ', float),
('MH50_GALPHI', float),
('AVG_ALPHAFE', float)])
data['MH50_DIST'] = (10**((data['HAYDEN_DISTMOD_50']+5)/5))/1e3
if distance_correction == True:
data['MH50_DIST'] *= 1.05
XYZ= bovy_coords.lbd_to_XYZ(data['GLON'],
data['GLAT'],
data['MH50_DIST'],
degree=True)
RphiZ= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],
XYZ[:,1],
XYZ[:,2],
Xsun=8.,Zsun=0.025)
data['MH50_GALR']= RphiZ[:,0]
data['MH50_GALPHI']= RphiZ[:,1]
data['MH50_GALZ']= RphiZ[:,2]
data['M_J'] = data['J0']-data['HAYDEN_DISTMOD_50']
data['M_H'] = data['H0']-data['HAYDEN_DISTMOD_50']
data['M_K'] = data['K0']-data['HAYDEN_DISTMOD_50']
data['AVG_ALPHAFE'] = avg_alphafe_dr12(data)
data[_FEHTAG] += -0.1
#remove locations not in the apogee selection function (FIND OUT WHATS UP HERE)
data = data[np.in1d(data['LOCATION_ID'], apo.list_fields())]
# Remove locations outside of the Pan-STARRS dust map
# In the Southern hemisphere
data= data[data['LOCATION_ID'] != 4266] #240,-18
data= data[data['LOCATION_ID'] != 4331] #5.5,-14.2
data= data[data['LOCATION_ID'] != 4381] #5.2,-12.2
data= data[data['LOCATION_ID'] != 4332] #1,-4
data= data[data['LOCATION_ID'] != 4329] #0,-5
data= data[data['LOCATION_ID'] != 4351] #0,-2
data= data[data['LOCATION_ID'] != 4353] #358,0
data= data[data['LOCATION_ID'] != 4385] #358.6,1.4
# Close to the ecliptic pole where there's no data (is it the ecliptic pole?
data= data[data['LOCATION_ID'] != 4528] #120,30
data= data[data['LOCATION_ID'] != 4217] #123,22.4
#remove any non-finite magnitudes
data = data[np.isfinite(data['M_H'])]
if verbose == True:
print str(len(data))+' Stars with distance measures (and in good fields...)'
if add_ages == True:
if agetype == 'Martig':
ages = fits.open(catpath+'DR12_martigages_vizier.fits')[1].data
idtag = '2MASS_ID'
if agetype == 'Cannon':
ages = fits.open(catpath+'RGB_Cannon_Ages.fits')[1].data
ages = esutil.numpy_util.add_fields(ages,[('Age', float)])
ages['Age'] = np.exp(ages['ln_age'])
idtag = 'ID'
ages_tab = Table(data=ages)
ages_tab.rename_column(idtag, 'APOGEE_ID')
tab = join( ages_tab,data, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
allStar_full = tab.as_array()
data = allStar_full
if verbose == True:
print str(len(data))+' Stars with ages'
if apply_corrections == True:
#martig1 = np.genfromtxt(catpath+'martig2016_table1.txt', dtype=None, names=True, skip_header=2)
martig1 = fits.open(catpath+'martig_table1.fits')
fit = lowess(np.log10(martig1['Age_out']),np.log10(martig1['Age_in']))
xs = np.linspace(-0.3,1.2,100)
xsinterpolate = interp1d(xs,xs)
fys = fit[:,0]-xsinterpolate(fit[:,1])
interp = UnivariateSpline(fit[:,1], fys)
corr_age = np.log10(data['Age'])+(interp(np.log10(data['Age'])))
corr_age = 10**corr_age
data['Age'] = corr_age
return data
def avg_alphafe_dr12(data):
weight_o= np.ones(len(data))
weight_s= np.ones(len(data))
weight_si= np.ones(len(data))
weight_ca= np.ones(len(data))
weight_mg= np.ones(len(data))
weight_o[data['O_H'] == -9999.0]= 0.
weight_s[data['S_H'] == -9999.0]= 0.
weight_si[data['SI_H'] == -9999.0]= 0.
weight_ca[data['CA_H'] == -9999.0]= 0.
weight_mg[data['MG_H'] == -9999.0]= 0.
return (weight_o*data['O_H']+weight_s*data['S_H']
+weight_si*data['SI_H']+weight_ca*data['CA_H']
+weight_mg*data['MG_H'])/(weight_o+weight_s
+weight_si+weight_ca
+weight_mg)\
-data['FE_H']-0.05
# Define the low-alpha, low-iron sample
def _lowlow_lowfeh(afe):
# The low metallicity edge
return -0.6
def _lowlow_highfeh(afe):
# The high metallicity edge
return -0.25
def _lowlow_lowafe(feh):
# The low alpha edge (-0.15,-0.075) to (-0.5,0)
return (0--0.075)/(-0.5--0.15)*(feh+0.1--0.15)-0.075
def _lowlow_highafe(feh):
# The high alpha edge (-0.15,0.075) to (-0.5,0.15)
return (0.15-0.075)/(-0.5--0.15)*(feh+0.1--0.15)+0.075
def get_lowlowsample():
"""
NAME:
get_lowlowsample
PURPOSE:
get the RGB sample at low alpha, low iron
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _lowlow_lowfeh(0.)
highfeh= _lowlow_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _lowlow_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _lowlow_highafe(data[_FEHTAG]))
return data[indx]
# Define the high-alpha sample
def _highalpha_lowfeh(afe):
# The low metallicity edge
return -0.8
def _highalpha_highfeh(afe):
# The high metallicity edge
return -0.2
def _highalpha_lowafe(feh):
# The low alpha edge (-0.125,0.115) to (-0.6,0.215)
return (0.2-0.1)/(-0.6--0.125)*(feh+0.1--0.125)+0.115
def _highalpha_highafe(feh):
# The high alpha edge (-0.125,0.19) to (-0.6,0.29)
return (0.275-0.175)/(-0.6--0.125)*(feh+0.1--0.125)+0.19
def get_highalphasample():
"""
NAME:
get_highalphasample
PURPOSE:
get the RC sample at high alpha
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-24 - Started - Bovy (IAS)
"""
# Get the full sample first
data= get_rcsample()
# Now cut it
lowfeh= _highalpha_lowfeh(0.)
highfeh= _highalpha_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _highalpha_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _highalpha_highafe(data[_FEHTAG]))
return data[indx]
# Define the solar sample
def _solar_lowfeh(afe):
# The low metallicity edge
return -0.2
def _solar_highfeh(afe):
# The high metallicity edge
return 0.
def _solar_lowafe(feh):
# The low alpha edge (0.1,-0.075) to (-0.1,-0.075)
return -0.075
def _solar_highafe(feh):
# The high alpha edge (-0.15,0.1) to (0.1,0.05)
return (0.1-0.05)/(-0.15-0.1)*(feh+0.1-0.1)+0.05
def get_solarsample():
"""
NAME:
get_solarsample
PURPOSE:
get the RC sample at solar abundances
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _solar_lowfeh(0.)
highfeh= _solar_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _solar_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _solar_highafe(data[_FEHTAG]))
return data[indx]
# Define the high metallicity sample
def _highfeh_lowfeh(afe):
# The low metallicity edge
return 0.05
def _highfeh_highfeh(afe):
# The high metallicity edge
return 0.3
def _highfeh_lowafe(feh):
# The low alpha edge (0.1,-0.075) to (-0.1,-0.075)
return -0.075
def _highfeh_highafe(feh):
# The high alpha edge (-0.15,0.1) to (0.1,0.05)
return 0.05
def get_highfehsample():
"""
NAME:
get_highfehsample
PURPOSE:
get the RC sample at high [Fe/H]
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _highfeh_lowfeh(0.)
highfeh= _highfeh_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _highfeh_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _highfeh_highafe(data[_FEHTAG]))
return data[indx]
def alphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (0.12/-0.6)*fehs[fehs < 0]+0.03
edge[fehs >= 0] = 0.03
return edge
def highalphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (-0.13/0.6)*fehs[fehs < 0]+0.04
edge[fehs >= 0] = 0.04
return edge
def lowalphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (-0.10/0.6)*fehs[fehs < 0]+0.01
edge[fehs >= 0] = 0.01
return edge
def get_fehage(agebin = [0.,1.], fehbin = [0.,0.2], afebin = 'low', dr=None, agetype='Martig', apply_corrections=False, distance_correction=False):
data = get_rgbsample(add_ages=True, agetype=agetype, apply_corrections=apply_corrections, distance_correction=distance_correction)
if afebin == 'low':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] < alphaedge(data[_FEHTAG]))
if afebin == 'high':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= alphaedge(data[_FEHTAG]))
if afebin == 'highclean':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= highalphaedge(data[_FEHTAG]))
if afebin == 'lowclean':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] <= lowalphaedge(data[_FEHTAG]))
if afebin == 'lownew':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] <= alphaedge(data[_FEHTAG])-0.025)
if afebin == 'highnew':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= alphaedge(data[_FEHTAG])+0.025)
if afebin == None:
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])
return data[indx]
def highalphalocus():
data= get_rgbsample()
indx= (data[_AFETAG] > (0.2-0.1)/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] < -0.225)\
+(data[_AFETAG] > 0.05/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] >= -0.225)*(data[_FEHTAG] < 0.125)\
+(data[_FEHTAG] >= 0.125)
return lowess(data[_AFETAG][indx],data[_FEHTAG][indx],frac=0.6)
def lowalphalocus():
data= get_rgbsample()
indx= (data[_AFETAG] > (0.2-0.1)/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] < -0.025)\
+(data[_AFETAG] > 0.05/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] >= -0.225)*(data[_FEHTAG] < 0.125)
return lowess(data[_AFETAG][True-indx],data[_FEHTAG][True-indx],frac=0.6)
class MAPs:
"""Class that pixelizes the data sample in [Fe/H] and [a/Fe]"""
def __init__(self,data=None,dfeh=0.1,dafe=0.05,fehmin=-0.75,fehmax=0.35,
afemin=-0.075,afemax=0.275):
"""
NAME:
__init__
PURPOSE:
initialize the MAPs
INPUT:
data= (None) the data sample; if None, whole stat. RC sample
dfeh, dafe= pixel size
fehmin, fehmax, afemin, afemax= minimum and maximum FeH and AFe
OUTPUT:
object with pixelized data
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
if data is None: data= get_rcsample()
self.data= data
self.dx= dfeh
self.dy= dafe
self.xmin= fehmin
self.xmax= fehmax
self.ymin= afemin
self.ymax= afemax
# edges in X and Y
self.xedges= numpy.arange(self.xmin,self.xmax+0.01,self.dx)
self.yedges= numpy.arange(self.ymin,self.ymax+0.01,self.dy)
# X and Y
self.x= data[_FEHTAG]
self.y= data[_AFETAG]
return None
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
return the part of the sample in a (feh,afe) pixel
INPUT:
[Fe/H]
[a/Fe]
OUTPUT:
returns data recarray in the bin that feh and afe are in
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#Find bin
xbin= int(math.floor((args[0]-self.xmin)/self.dx))
ybin= int(math.floor((args[1]-self.ymin)/self.dy))
#Return data
return self.data[(self.x > self.xedges[xbin])\
*(self.x <= self.xedges[xbin+1])\
*(self.y > self.yedges[ybin])\
*(self.y <= self.yedges[ybin+1])]
def map(self):
"""
NAME:
map
PURPOSE:
yield a map
INPUT:
(none)
OUTPUT:
iterates over the MAPs
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
nx= int((self.xmax-self.xmin)/self.dx)
ny= int((self.ymax-self.ymin)/self.dy)
gx= numpy.linspace(self.xmin+self.dx/2.,self.xmax-self.dx/2.,nx)
gy= numpy.linspace(self.ymin+self.dy/2.,self.ymax-self.dy/2.,ny)
for ii in range(nx):
for jj in range(ny):
yield self(gx[ii],gy[jj])
def callIndx(self,*args,**kwargs):
"""
NAME:
callIndx
PURPOSE:
return index of the part of the sample in an [Fe/H] and [a/Fe] pixel
INPUT:
[Fe/H]
[a/Fe]
OUTPUT:
returns index into data recarray in the bin that [Fe/H] and [a/Fe] are in
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#Find bin
xbin= int(math.floor((args[0]-self.xmin)/self.dx))
ybin= int(math.floor((args[1]-self.ymin)/self.dy))
#Return data
return (self.x > self.xedges[xbin])\
*(self.x <= self.xedges[xbin+1])\
*(self.y > self.yedges[ybin])\
*(self.y <= self.yedges[ybin+1])
def xindx(self,x):
"""
NAME:
xindx
PURPOSE:
return the index corresponding to a [Fe/H] value
INPUT:
[Fe/H]
OUTPUT:
index
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return int(math.floor((x-self.xmin)/self.dx))
def yindx(self,y):
"""
NAME:
yindx
PURPOSE:
return the index corresponding to a [a/Fe] value
INPUT:
[a/Fe]
OUTPUT:
index
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return int(math.floor((y-self.ymin)/self.dy))
def plot(self,quant,func=numpy.median,minnstar=20.,submediany=False,
returnz=False,justcalc=False,
**kwargs):
"""
NAME:
plot
PURPOSE:
make a plot of a quantity as a function of X and Y
INPUT:
quant - the quantity (string that returns the quantity, like
'METALS') or a function of the data
func - function of quantity to plot
minnstar= minimum number of stars (20)
submeany= subtract the median y
justcalc= (False) if True, do not plot
bovy_plot.bovy_dens2d kwargs
OUTPUT:
plot to output device
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#First create 2D
nx= int((self.xmax-self.xmin)/self.dx)
ny= int((self.ymax-self.ymin)/self.dy)
gx= numpy.linspace(self.xmin+self.dx/2.,self.xmax-self.dx/2.,nx)
gy= numpy.linspace(self.ymin+self.dy/2.,self.ymax-self.dy/2.,ny)
z2d= numpy.empty((nx,ny))
if isinstance(quant,numpy.ndarray):
z2d= numpy.reshape(quant,(nx,ny))
for ii in range(z2d.shape[0]):
for jj in range(z2d.shape[1]):
tdata= self(gx[ii],gy[jj])
if len(tdata) < minnstar:
z2d[ii,jj]= numpy.nan
else:
nbins= 0
for ii in range(z2d.shape[0]):
for jj in range(z2d.shape[1]):
tdata= self(gx[ii],gy[jj])
if len(tdata) < minnstar:
z2d[ii,jj]= numpy.nan
else:
nbins+= 1
if hasattr(quant, '__call__'):
z2d[ii,jj]= func(quant(tdata))
else:
z2d[ii,jj]= func(tdata[quant])
if submediany:
z2d[ii,:]-= \
numpy.median(z2d[ii,True-numpy.isnan(z2d[ii,:])])
if justcalc:
if returnz:
return z2d
else:
return None
#Now plot
xrange= kwargs.pop('xrange',[self.xmin,self.xmax])
yrange= kwargs.pop('yrange',[self.ymin,self.ymax])
if not kwargs.has_key('colorbar'):
kwargs['colorbar']= True
if not kwargs.has_key('shrink'):
kwargs['shrink']= 0.78
if not kwargs.has_key('vmin'):
kwargs['vmin']= numpy.nanmin(z2d)
if not kwargs.has_key('vmax'):
kwargs['vmax']= numpy.nanmax(z2d)
xlabel= r'$[\mathrm{Fe/H}]$'
ylabel= _AFELABEL
cmap= kwargs.pop('cmap','coolwarm')
out= bovy_plot.bovy_dens2d(z2d.T,origin='lower',cmap=cmap,
interpolation='nearest',
xlabel=xlabel,ylabel=ylabel,
xrange=xrange,yrange=yrange,
**kwargs)
if returnz:
return z2d
else:
return out | 0.280814 | 0.216094 |
from errno import ESRCH
import yaml
import re
import subprocess
import os
from datetime import datetime
from time import time, sleep
from subprocess import Popen
from subprocess import PIPE
from subprocess import CalledProcessError
from subprocess import TimeoutExpired
from argparse import ArgumentParser
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
import sys
import logging
failures = 0
passed = 0
def load_config():
with open('perfs.yaml','r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return data
conf = load_config()
def build_COSclient(secretID, secretKey, Region):
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
secret_id = secretID
secret_key = secretKey
region = Region
token = None # TODO(zhihanz) support token for client
scheme = 'https'
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token, Scheme=scheme)
client = CosS3Client(config)
return client
def execute(suit, bin_path, host, port, concurrency, iterations, output_dir, type, region, bucket, S3path, secretID, secretKey):
base_cfg = conf['config']
if iterations == "" :
iterations = suit.get("iterations", base_cfg['iterations'])
if concurrency == "":
concurrency = suit.get("concurrency", base_cfg['concurrency'])
if bin_path == "":
print("you should specific path for fuse-benchmark binary file")
return
suit_name = re.sub(r"\s+", '-', suit['name'])
file_name = "{}-result.json".format(suit_name)
json_path = os.path.join(output_dir, file_name)
command = '{} -c {} -i {} -h {} -p {} --query "{}" --json "{}" '.format(bin_path, concurrency, iterations, host, port, suit['query'], json_path)
print("perf {}, query: {} \n".format(suit_name, suit['query']))
proc = Popen(command, shell=True, env=os.environ)
start_time = datetime.now()
while proc.poll() is None:
sleep(0.01)
total_time = (datetime.now() - start_time).total_seconds()
if type == "COS":
COScli = build_COSclient(secretID, secretKey, region)
with open(json_path, 'rb') as fp:
response = COScli.put_object(
Bucket=bucket,
Body=fp,
Key='{}/{}'.format(S3path, file_name),
StorageClass='STANDARD',
EnableMD5=False
)
print(response['ETag'])
global failures
global passed
if proc.returncode is None:
try:
proc.kill()
except OSError as e:
if e.errno != ESRCH:
raise
failures += 1
elif proc.returncode != 0:
failures += 1
else:
passed += 1
if __name__ == '__main__':
parser = ArgumentParser(description='fuse perf tests')
parser.add_argument('-o', '--output', default = ".", help='Perf results directory')
parser.add_argument('-b', '--bin', default = "fuse-benchmark", help='Fuse benchmark binary')
parser.add_argument('--host', default = "127.0.0.1", help='Clickhouse handler Server host')
parser.add_argument('-p', '--port', default = "9001", help='Clickhouse handler Server port')
parser.add_argument('-c', '--concurrency', default = "", help='Set default concurrency for all perf tests')
parser.add_argument('-i', '--iteration', default = "", help='Set default iteration number for each performance tests to run')
parser.add_argument('-t', '--type', default = "local", help='Set storage endpoint for performance testing, support local and COS')
parser.add_argument('--region', default = "", help='Set storage region')
parser.add_argument('--bucket', default = "", help='Set storage bucket')
parser.add_argument('--path', default = "", help='Set absolute path to store objects')
parser.add_argument('--secretID', default = "", help='Set storage secret ID')
parser.add_argument('--secretKey', default = "", help='Set storage secret Key')
args = parser.parse_args()
for suit in conf['perfs']:
execute(suit, args.bin, args.host, args.port, args.concurrency, args.iteration, args.output,
args.type, args.region, args.bucket, args.path, args.secretID, args.secretKey) | tests/perfs/perfs.py | from errno import ESRCH
import yaml
import re
import subprocess
import os
from datetime import datetime
from time import time, sleep
from subprocess import Popen
from subprocess import PIPE
from subprocess import CalledProcessError
from subprocess import TimeoutExpired
from argparse import ArgumentParser
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
import sys
import logging
failures = 0
passed = 0
def load_config():
with open('perfs.yaml','r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return data
conf = load_config()
def build_COSclient(secretID, secretKey, Region):
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
secret_id = secretID
secret_key = secretKey
region = Region
token = None # TODO(zhihanz) support token for client
scheme = 'https'
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token, Scheme=scheme)
client = CosS3Client(config)
return client
def execute(suit, bin_path, host, port, concurrency, iterations, output_dir, type, region, bucket, S3path, secretID, secretKey):
base_cfg = conf['config']
if iterations == "" :
iterations = suit.get("iterations", base_cfg['iterations'])
if concurrency == "":
concurrency = suit.get("concurrency", base_cfg['concurrency'])
if bin_path == "":
print("you should specific path for fuse-benchmark binary file")
return
suit_name = re.sub(r"\s+", '-', suit['name'])
file_name = "{}-result.json".format(suit_name)
json_path = os.path.join(output_dir, file_name)
command = '{} -c {} -i {} -h {} -p {} --query "{}" --json "{}" '.format(bin_path, concurrency, iterations, host, port, suit['query'], json_path)
print("perf {}, query: {} \n".format(suit_name, suit['query']))
proc = Popen(command, shell=True, env=os.environ)
start_time = datetime.now()
while proc.poll() is None:
sleep(0.01)
total_time = (datetime.now() - start_time).total_seconds()
if type == "COS":
COScli = build_COSclient(secretID, secretKey, region)
with open(json_path, 'rb') as fp:
response = COScli.put_object(
Bucket=bucket,
Body=fp,
Key='{}/{}'.format(S3path, file_name),
StorageClass='STANDARD',
EnableMD5=False
)
print(response['ETag'])
global failures
global passed
if proc.returncode is None:
try:
proc.kill()
except OSError as e:
if e.errno != ESRCH:
raise
failures += 1
elif proc.returncode != 0:
failures += 1
else:
passed += 1
if __name__ == '__main__':
parser = ArgumentParser(description='fuse perf tests')
parser.add_argument('-o', '--output', default = ".", help='Perf results directory')
parser.add_argument('-b', '--bin', default = "fuse-benchmark", help='Fuse benchmark binary')
parser.add_argument('--host', default = "127.0.0.1", help='Clickhouse handler Server host')
parser.add_argument('-p', '--port', default = "9001", help='Clickhouse handler Server port')
parser.add_argument('-c', '--concurrency', default = "", help='Set default concurrency for all perf tests')
parser.add_argument('-i', '--iteration', default = "", help='Set default iteration number for each performance tests to run')
parser.add_argument('-t', '--type', default = "local", help='Set storage endpoint for performance testing, support local and COS')
parser.add_argument('--region', default = "", help='Set storage region')
parser.add_argument('--bucket', default = "", help='Set storage bucket')
parser.add_argument('--path', default = "", help='Set absolute path to store objects')
parser.add_argument('--secretID', default = "", help='Set storage secret ID')
parser.add_argument('--secretKey', default = "", help='Set storage secret Key')
args = parser.parse_args()
for suit in conf['perfs']:
execute(suit, args.bin, args.host, args.port, args.concurrency, args.iteration, args.output,
args.type, args.region, args.bucket, args.path, args.secretID, args.secretKey) | 0.238728 | 0.061059 |
import os
import click
from ggdtrack.duke_dataset import Duke, DukeMini
from ggdtrack.visdrone_dataset import VisDrone
from ggdtrack.mot16_dataset import Mot16
from ggdtrack.eval import prep_eval_graphs, prep_eval_tracks, eval_prepped_tracks, eval_prepped_tracks_joined
from ggdtrack.graph_diff import prep_minimal_graph_diffs, find_minimal_graph_diff
from ggdtrack.klt_det_connect import prep_training_graphs
from ggdtrack.model import NNModelGraphresPerConnection
from ggdtrack.train import train_graphres_minimal
global_skip = {
"LongConnectionOrder",
"LongFalsePositiveTrack"
}
ggd_types = {
'FalsePositive',
'SplitFromFalsePositive',
'ExtraFirst',
'DualFalsePositive',
'DetectionSkipp',
'SkippLast',
'Split',
'SkippFirst',
'SplitToFalsePositive',
'ExtraLast',
'IdSwitch',
'DoubleSplitAndMerge',
'Merge',
'SplitAndMerge',
'TooShortTrack',
'LongTrack',
'LongConnectionOrder',
'LongFalsePositiveTrack',
}
@click.command()
@click.option("--dataset", default="Duke", help="Dataset loader class (Mot16, Duke or VisDrone)")
@click.option("--datadir", default="data", help="Directory into which the Duke dataset will be downloaded")
@click.option("--threads", default=None, type=int, help="The number of threads to use. Default is one per CPU core.")
@click.option("--segment-length", default=10, type=int, help="The length in seconds of video used for each garph")
@click.option("--cachedir", default="cachedir", help="Directory into which intermediate results are cached between runs")
@click.option("--minimal-confidence", default=None, type=float, help="Minimal confidense of detection to consider")
@click.option("--fold", default=None, type=int)
@click.option("--max-connect", default=5, type=int)
@click.option("--max-worse-eval-epochs", default=float('Inf'), type=float)
@click.option("--epochs", default=10, type=int)
@click.option("--too-short-track", default=2, type=int)
@click.option("--logdir-prefix", default="", help="Prepended to logdir path")
def main(dataset, datadir, threads, segment_length, cachedir, minimal_confidence, fold, max_connect,
max_worse_eval_epochs, epochs, too_short_track, logdir_prefix):
opts = dict(cachedir=cachedir, default_min_conf=minimal_confidence)
if fold is not None:
opts['fold'] = fold
dataset = eval(dataset)(datadir, **opts)
dataset.cachedir = cachedir
logdir = logdir_prefix + '/' + dataset.logdir
find_minimal_graph_diff.too_short_track = too_short_track
find_minimal_graph_diff.long_track = too_short_track * 2
for skipped in ggd_types:
if skipped in global_skip:
continue
prep_training_graphs(dataset, cachedir, limit_train_amount=0.1, threads=threads, segment_length_s=segment_length,
worker_params=dict(max_connect=max_connect))
dataset.logdir = logdir + "_skipped_" + skipped
if os.path.exists(dataset.logdir):
continue
print(dataset.logdir)
model = NNModelGraphresPerConnection()
prep_minimal_graph_diffs(dataset, model, threads=threads, skipped_ggd_types=global_skip.union([skipped]))
prep_eval_graphs(dataset, model, threads=threads)
train_graphres_minimal(dataset, model, epochs=epochs, max_worse_eval_epochs=max_worse_eval_epochs)
prep_eval_tracks(dataset, model, 'eval', threads=1)
res, res_int = eval_prepped_tracks(dataset, 'eval')
open(os.path.join(dataset.logdir, "eval_results.txt"), "w").write(res)
open(os.path.join(dataset.logdir, "eval_results_int.txt"), "w").write(res_int)
if __name__ == '__main__':
main() | ablation.py | import os
import click
from ggdtrack.duke_dataset import Duke, DukeMini
from ggdtrack.visdrone_dataset import VisDrone
from ggdtrack.mot16_dataset import Mot16
from ggdtrack.eval import prep_eval_graphs, prep_eval_tracks, eval_prepped_tracks, eval_prepped_tracks_joined
from ggdtrack.graph_diff import prep_minimal_graph_diffs, find_minimal_graph_diff
from ggdtrack.klt_det_connect import prep_training_graphs
from ggdtrack.model import NNModelGraphresPerConnection
from ggdtrack.train import train_graphres_minimal
global_skip = {
"LongConnectionOrder",
"LongFalsePositiveTrack"
}
ggd_types = {
'FalsePositive',
'SplitFromFalsePositive',
'ExtraFirst',
'DualFalsePositive',
'DetectionSkipp',
'SkippLast',
'Split',
'SkippFirst',
'SplitToFalsePositive',
'ExtraLast',
'IdSwitch',
'DoubleSplitAndMerge',
'Merge',
'SplitAndMerge',
'TooShortTrack',
'LongTrack',
'LongConnectionOrder',
'LongFalsePositiveTrack',
}
@click.command()
@click.option("--dataset", default="Duke", help="Dataset loader class (Mot16, Duke or VisDrone)")
@click.option("--datadir", default="data", help="Directory into which the Duke dataset will be downloaded")
@click.option("--threads", default=None, type=int, help="The number of threads to use. Default is one per CPU core.")
@click.option("--segment-length", default=10, type=int, help="The length in seconds of video used for each garph")
@click.option("--cachedir", default="cachedir", help="Directory into which intermediate results are cached between runs")
@click.option("--minimal-confidence", default=None, type=float, help="Minimal confidense of detection to consider")
@click.option("--fold", default=None, type=int)
@click.option("--max-connect", default=5, type=int)
@click.option("--max-worse-eval-epochs", default=float('Inf'), type=float)
@click.option("--epochs", default=10, type=int)
@click.option("--too-short-track", default=2, type=int)
@click.option("--logdir-prefix", default="", help="Prepended to logdir path")
def main(dataset, datadir, threads, segment_length, cachedir, minimal_confidence, fold, max_connect,
max_worse_eval_epochs, epochs, too_short_track, logdir_prefix):
opts = dict(cachedir=cachedir, default_min_conf=minimal_confidence)
if fold is not None:
opts['fold'] = fold
dataset = eval(dataset)(datadir, **opts)
dataset.cachedir = cachedir
logdir = logdir_prefix + '/' + dataset.logdir
find_minimal_graph_diff.too_short_track = too_short_track
find_minimal_graph_diff.long_track = too_short_track * 2
for skipped in ggd_types:
if skipped in global_skip:
continue
prep_training_graphs(dataset, cachedir, limit_train_amount=0.1, threads=threads, segment_length_s=segment_length,
worker_params=dict(max_connect=max_connect))
dataset.logdir = logdir + "_skipped_" + skipped
if os.path.exists(dataset.logdir):
continue
print(dataset.logdir)
model = NNModelGraphresPerConnection()
prep_minimal_graph_diffs(dataset, model, threads=threads, skipped_ggd_types=global_skip.union([skipped]))
prep_eval_graphs(dataset, model, threads=threads)
train_graphres_minimal(dataset, model, epochs=epochs, max_worse_eval_epochs=max_worse_eval_epochs)
prep_eval_tracks(dataset, model, 'eval', threads=1)
res, res_int = eval_prepped_tracks(dataset, 'eval')
open(os.path.join(dataset.logdir, "eval_results.txt"), "w").write(res)
open(os.path.join(dataset.logdir, "eval_results_int.txt"), "w").write(res_int)
if __name__ == '__main__':
main() | 0.317744 | 0.136839 |
import logging
from typing import Union
from easydict import EasyDict as edict
from xviz.message import XVIZMessage
from xviz.v2.session_pb2 import Metadata, StreamMetadata
from xviz.v2.style_pb2 import StyleStreamValue
ANNOTATION_TYPES = StreamMetadata.AnnotationType
CATEGORY = StreamMetadata.Category
COORDINATE_TYPES = StreamMetadata.CoordinateType
SCALAR_TYPE = StreamMetadata.ScalarType
PRIMITIVE_TYPES = StreamMetadata.PrimitiveType
UIPRIMITIVE_TYPES = StreamMetadata.UIPrimitiveType
PRIMITIVE_STYLE_MAP = dict([
(PRIMITIVE_TYPES.CIRCLE, [
'opacity',
'stroked',
'filled',
'stroke_color',
'fill_color',
'radius',
'radius_min_pixels',
'radius_max_pixels',
'stroke_width',
'stroke_width_min_pixels',
'stroke_width_max_pixels'
]),
(PRIMITIVE_TYPES.POINT, [
'opacity',
'fill_color',
'radius_pixels',
# TODO: Following two are not listed in protobuf
# 'point_color_mode',
# 'point_color_domain'
]),
(PRIMITIVE_TYPES.POLYGON, [
'stroke_color',
'fill_color',
'stroke_width',
'stroke_width_min_pixels',
'stroke_width_max_pixels',
'height',
'opacity',
'stroked',
'filled',
'extruded'
]),
(PRIMITIVE_TYPES.TEXT, [
'opacity',
'font_family',
'font_weight',
'text_size',
'text_rotation',
'text_anchor',
'text_baseline',
'fill_color'
]),
(PRIMITIVE_TYPES.POLYLINE, [
'opacity',
'stroke_color',
'stroke_width',
'stroke_width_min_pixels',
'stroke_width_max_pixels'
]),
(PRIMITIVE_TYPES.STADIUM, [
'opacity',
'fill_color',
'radius',
'radius_min_pixels',
'radius_max_pixels'
])
])
# Test whether the keys are correct
for fields in PRIMITIVE_STYLE_MAP.values():
for f in fields:
assert f in StyleStreamValue.__dict__
class XVIZBaseBuilder:
"""
# Reference
[@xviz/builder/xviz-base-builder]/(https://github.com/uber/xviz/blob/master/modules/builder/src/builders/xviz-base-builder.js)
"""
def __init__(self, category, metadata: Union[Metadata, XVIZMessage], logger=None):
self._stream_id = None
self._category = category
self._metadata = metadata.data if isinstance(metadata, XVIZMessage) else metadata
self._logger = logger or logging.getLogger("xviz")
def stream(self, stream_id):
if self._stream_id:
self._flush()
self._stream_id = stream_id
return self
@property
def stream_id(self):
return self._stream_id
@property
def category(self):
return self._category
@property
def metadata(self):
return self._metadata
def _flush(self):
raise NotImplementedError("Derived class should implement this method")
def reset(self):
self._category = None
def _validate_has_prop(self, name):
if not hasattr(self, name) or not getattr(self, name):
self._logger.warning("Stream %s: %s is missing", self.stream_id, name)
def _validate_prop_set_once(self, prop, msg=None):
if not hasattr(self, prop):
return
val = getattr(self, prop)
if not val:
return
if isinstance(val, list) and len(val) == 0:
return
self._logger.warning(msg or "Stream {}: {} has been already set."\
.format(self.stream_id, prop))
def _validate_match_metadata(self):
if not self._metadata:
self._logger.warning("Metadata is missing.")
elif self._stream_id not in self._metadata.streams:
self._logger.warning("%s is not defined in metadata.", self._stream_id)
else:
metastream = self._metadata.streams[self._stream_id]
if self._category != metastream.category:
self._logger.warning(
"Stream %s category '%s' does not match metadata definition (%s).",
self._stream_id,
CATEGORY.Name(self._category),
CATEGORY.Name(metastream.category)
)
def _validate(self):
self._validate_has_prop('_stream_id')
self._validate_has_prop('_category')
self._validate_match_metadata()
import array
from xviz.v2.style_pb2 import StyleObjectValue, StyleStreamValue
def build_object_style(style):
'''
Create StyleObjectValue from dictionary. It basically deal with list of bytes.
'''
if 'fill_color' in style.keys():
style['fill_color'] = bytes(style['fill_color'])
if 'stroke_color' in style.keys():
style['stroke_color'] = bytes(style['stroke_color'])
return StyleObjectValue(**style)
def build_stream_style(style):
'''
Create StyleStreamValue from dictionary. It basically deal with list of bytes.
'''
if 'fill_color' in style.keys():
style['fill_color'] = bytes(style['fill_color'])
if 'stroke_color' in style.keys():
style['stroke_color'] = bytes(style['stroke_color'])
return StyleStreamValue(**style) | python/xviz/builder/base_builder.py | import logging
from typing import Union
from easydict import EasyDict as edict
from xviz.message import XVIZMessage
from xviz.v2.session_pb2 import Metadata, StreamMetadata
from xviz.v2.style_pb2 import StyleStreamValue
ANNOTATION_TYPES = StreamMetadata.AnnotationType
CATEGORY = StreamMetadata.Category
COORDINATE_TYPES = StreamMetadata.CoordinateType
SCALAR_TYPE = StreamMetadata.ScalarType
PRIMITIVE_TYPES = StreamMetadata.PrimitiveType
UIPRIMITIVE_TYPES = StreamMetadata.UIPrimitiveType
PRIMITIVE_STYLE_MAP = dict([
(PRIMITIVE_TYPES.CIRCLE, [
'opacity',
'stroked',
'filled',
'stroke_color',
'fill_color',
'radius',
'radius_min_pixels',
'radius_max_pixels',
'stroke_width',
'stroke_width_min_pixels',
'stroke_width_max_pixels'
]),
(PRIMITIVE_TYPES.POINT, [
'opacity',
'fill_color',
'radius_pixels',
# TODO: Following two are not listed in protobuf
# 'point_color_mode',
# 'point_color_domain'
]),
(PRIMITIVE_TYPES.POLYGON, [
'stroke_color',
'fill_color',
'stroke_width',
'stroke_width_min_pixels',
'stroke_width_max_pixels',
'height',
'opacity',
'stroked',
'filled',
'extruded'
]),
(PRIMITIVE_TYPES.TEXT, [
'opacity',
'font_family',
'font_weight',
'text_size',
'text_rotation',
'text_anchor',
'text_baseline',
'fill_color'
]),
(PRIMITIVE_TYPES.POLYLINE, [
'opacity',
'stroke_color',
'stroke_width',
'stroke_width_min_pixels',
'stroke_width_max_pixels'
]),
(PRIMITIVE_TYPES.STADIUM, [
'opacity',
'fill_color',
'radius',
'radius_min_pixels',
'radius_max_pixels'
])
])
# Test whether the keys are correct
for fields in PRIMITIVE_STYLE_MAP.values():
for f in fields:
assert f in StyleStreamValue.__dict__
class XVIZBaseBuilder:
"""
# Reference
[@xviz/builder/xviz-base-builder]/(https://github.com/uber/xviz/blob/master/modules/builder/src/builders/xviz-base-builder.js)
"""
def __init__(self, category, metadata: Union[Metadata, XVIZMessage], logger=None):
self._stream_id = None
self._category = category
self._metadata = metadata.data if isinstance(metadata, XVIZMessage) else metadata
self._logger = logger or logging.getLogger("xviz")
def stream(self, stream_id):
if self._stream_id:
self._flush()
self._stream_id = stream_id
return self
@property
def stream_id(self):
return self._stream_id
@property
def category(self):
return self._category
@property
def metadata(self):
return self._metadata
def _flush(self):
raise NotImplementedError("Derived class should implement this method")
def reset(self):
self._category = None
def _validate_has_prop(self, name):
if not hasattr(self, name) or not getattr(self, name):
self._logger.warning("Stream %s: %s is missing", self.stream_id, name)
def _validate_prop_set_once(self, prop, msg=None):
if not hasattr(self, prop):
return
val = getattr(self, prop)
if not val:
return
if isinstance(val, list) and len(val) == 0:
return
self._logger.warning(msg or "Stream {}: {} has been already set."\
.format(self.stream_id, prop))
def _validate_match_metadata(self):
if not self._metadata:
self._logger.warning("Metadata is missing.")
elif self._stream_id not in self._metadata.streams:
self._logger.warning("%s is not defined in metadata.", self._stream_id)
else:
metastream = self._metadata.streams[self._stream_id]
if self._category != metastream.category:
self._logger.warning(
"Stream %s category '%s' does not match metadata definition (%s).",
self._stream_id,
CATEGORY.Name(self._category),
CATEGORY.Name(metastream.category)
)
def _validate(self):
self._validate_has_prop('_stream_id')
self._validate_has_prop('_category')
self._validate_match_metadata()
import array
from xviz.v2.style_pb2 import StyleObjectValue, StyleStreamValue
def build_object_style(style):
'''
Create StyleObjectValue from dictionary. It basically deal with list of bytes.
'''
if 'fill_color' in style.keys():
style['fill_color'] = bytes(style['fill_color'])
if 'stroke_color' in style.keys():
style['stroke_color'] = bytes(style['stroke_color'])
return StyleObjectValue(**style)
def build_stream_style(style):
'''
Create StyleStreamValue from dictionary. It basically deal with list of bytes.
'''
if 'fill_color' in style.keys():
style['fill_color'] = bytes(style['fill_color'])
if 'stroke_color' in style.keys():
style['stroke_color'] = bytes(style['stroke_color'])
return StyleStreamValue(**style) | 0.613352 | 0.258642 |
Langs = {'en': u'Английский',
'ja': u'Японский',
'ru': u'Русский',
'auto': u'Авто',
'sq': u'Албанский',
# 'ar': u'Арабский',
'af': u'Африкаанс',
'be': u'Белорусский',
'bg': u'Болгарский',
'cy': u'Валлийский',
'hu': u'Венгерский',
'vi': u'Вьетнамский',
'gl': u'Галисийский',
'nl': u'Голландский',
'el': u'Греческий',
'da': u'Датский',
'iw': u'Иврит',
'yi': u'Идиш',
'id': u'Индонезийский',
'ga': u'Ирландский',
'is': u'Исландский',
'es': u'Испанский',
'it': u'Итальянский',
'ca': u'Каталанский',
'zh-CN': u'Китайский',
'ko': u'Корейский',
'lv': u'Латышский',
'lt': u'Литовский',
'mk': u'Македонский',
'ms': u'Малайский',
'mt': u'мальтийский',
'de': u'Немецкий',
'no': u'Норвежский',
'fa': u'Персидский',
'pl': u'Польский',
'pt': u'Португальский',
'ro': u'Румынский',
'sr': u'Сербский',
'sk': u'Словацкий',
'sl': u'Словенский',
'sw': u'Суахили',
'tl': u'Тагальский',
'th': u'Тайский',
'tr': u'Турецкий',
'uk': u'Украинский',
'fi': u'Финский',
'fr': u'Французский',
'hi': u'Хинди',
'hr': u'Хорватский',
'cs': u'Чешский',
'sv': u'Шведский',
'et': u'Эстонский'}
import re
from urllib2 import quote
def gTrans(fLang, tLang, text):
url = "http://translate.google.ru/m?hl=ru&sl=%(fLang)s&tl=%(tLang)s&ie=UTF-8&prev=_m&q=%(text)s"
text = quote(text.encode("utf-8"))
try:
html = read_url(url % vars(), UserAgents["OperaMini"])
return uHTML(re_search(html, 'class="t0">', "</div>"))
except Exception, e:
return "%s: %s" % (e.__class__.__name__, e.message)
def gAutoTrans(mType, source, text):
if text:
repl = gTrans("auto", "ru", text)
if text == repl:
repl = u"Перевод %s => %s:\n%s" % ("auto", "en", gTrans("auto", "en", text))
else:
repl = u"Перевод %s => %s:\n%s" % ("auto", "ru", repl)
else:
repl = u"Недостаточно параметров."
reply(mType, source, repl)
def gTransHandler(mType, source, args):
if args and len(args.split()) > 2:
(fLang, tLang, text) = args.split(None, 2)
reply(mType, source, u"Перевод %s => %s:\n%s" % (fLang, tLang, gTrans(fLang, tLang, text)))
else:
answer = u"\nДоступные языки:\n"
for a, b in enumerate(sorted([x + u" — " + y for x, y in Langs.iteritems()])):
answer += u"%i. %s.\n" % (a + 1, b)
reply(mType, source, answer.encode("utf-8"))
command_handler(gTransHandler, 10, "trans")
command_handler(gAutoTrans, 10, "trans") | extensions/trans.py |
Langs = {'en': u'Английский',
'ja': u'Японский',
'ru': u'Русский',
'auto': u'Авто',
'sq': u'Албанский',
# 'ar': u'Арабский',
'af': u'Африкаанс',
'be': u'Белорусский',
'bg': u'Болгарский',
'cy': u'Валлийский',
'hu': u'Венгерский',
'vi': u'Вьетнамский',
'gl': u'Галисийский',
'nl': u'Голландский',
'el': u'Греческий',
'da': u'Датский',
'iw': u'Иврит',
'yi': u'Идиш',
'id': u'Индонезийский',
'ga': u'Ирландский',
'is': u'Исландский',
'es': u'Испанский',
'it': u'Итальянский',
'ca': u'Каталанский',
'zh-CN': u'Китайский',
'ko': u'Корейский',
'lv': u'Латышский',
'lt': u'Литовский',
'mk': u'Македонский',
'ms': u'Малайский',
'mt': u'мальтийский',
'de': u'Немецкий',
'no': u'Норвежский',
'fa': u'Персидский',
'pl': u'Польский',
'pt': u'Португальский',
'ro': u'Румынский',
'sr': u'Сербский',
'sk': u'Словацкий',
'sl': u'Словенский',
'sw': u'Суахили',
'tl': u'Тагальский',
'th': u'Тайский',
'tr': u'Турецкий',
'uk': u'Украинский',
'fi': u'Финский',
'fr': u'Французский',
'hi': u'Хинди',
'hr': u'Хорватский',
'cs': u'Чешский',
'sv': u'Шведский',
'et': u'Эстонский'}
import re
from urllib2 import quote
def gTrans(fLang, tLang, text):
url = "http://translate.google.ru/m?hl=ru&sl=%(fLang)s&tl=%(tLang)s&ie=UTF-8&prev=_m&q=%(text)s"
text = quote(text.encode("utf-8"))
try:
html = read_url(url % vars(), UserAgents["OperaMini"])
return uHTML(re_search(html, 'class="t0">', "</div>"))
except Exception, e:
return "%s: %s" % (e.__class__.__name__, e.message)
def gAutoTrans(mType, source, text):
if text:
repl = gTrans("auto", "ru", text)
if text == repl:
repl = u"Перевод %s => %s:\n%s" % ("auto", "en", gTrans("auto", "en", text))
else:
repl = u"Перевод %s => %s:\n%s" % ("auto", "ru", repl)
else:
repl = u"Недостаточно параметров."
reply(mType, source, repl)
def gTransHandler(mType, source, args):
if args and len(args.split()) > 2:
(fLang, tLang, text) = args.split(None, 2)
reply(mType, source, u"Перевод %s => %s:\n%s" % (fLang, tLang, gTrans(fLang, tLang, text)))
else:
answer = u"\nДоступные языки:\n"
for a, b in enumerate(sorted([x + u" — " + y for x, y in Langs.iteritems()])):
answer += u"%i. %s.\n" % (a + 1, b)
reply(mType, source, answer.encode("utf-8"))
command_handler(gTransHandler, 10, "trans")
command_handler(gAutoTrans, 10, "trans") | 0.121009 | 0.133726 |
from mitmproxy import contentviews
from mitmproxy.test import tflow
from mitmproxy.test import tutils
from mitmproxy.test import taddons
from mitmproxy.net.http import Headers
from ..mitmproxy import tservers
example_dir = tutils.test_data.push("../examples")
class TestScripts(tservers.MasterTest):
def test_add_header(self):
with taddons.context() as tctx:
a = tctx.script(example_dir.path("simple/add_header.py"))
f = tflow.tflow(resp=tutils.tresp())
a.response(f)
assert f.response.headers["newheader"] == "foo"
def test_custom_contentviews(self):
with taddons.context() as tctx:
tctx.script(example_dir.path("simple/custom_contentview.py"))
swapcase = contentviews.get("swapcase")
_, fmt = swapcase(b"<html>Test!</html>")
assert any(b'tEST!' in val[0][1] for val in fmt)
def test_iframe_injector(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/modify_body_inject_iframe.py"))
tctx.configure(
sc,
iframe = "http://example.org/evil_iframe"
)
f = tflow.tflow(
resp=tutils.tresp(content=b"<html><body>mitmproxy</body></html>")
)
tctx.master.addons.invoke_addon(sc, "response", f)
content = f.response.content
assert b'iframe' in content and b'evil_iframe' in content
def test_modify_form(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/modify_form.py"))
form_header = Headers(content_type="application/x-www-form-urlencoded")
f = tflow.tflow(req=tutils.treq(headers=form_header))
sc.request(f)
assert f.request.urlencoded_form["mitmproxy"] == "rocks"
f.request.headers["content-type"] = ""
sc.request(f)
assert list(f.request.urlencoded_form.items()) == [("foo", "bar")]
def test_modify_querystring(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/modify_querystring.py"))
f = tflow.tflow(req=tutils.treq(path="/search?q=term"))
sc.request(f)
assert f.request.query["mitmproxy"] == "rocks"
f.request.path = "/"
sc.request(f)
assert f.request.query["mitmproxy"] == "rocks"
def test_redirect_requests(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/redirect_requests.py"))
f = tflow.tflow(req=tutils.treq(host="example.org"))
sc.request(f)
assert f.request.host == "mitmproxy.org"
def test_send_reply_from_proxy(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/send_reply_from_proxy.py"))
f = tflow.tflow(req=tutils.treq(host="example.com", port=80))
sc.request(f)
assert f.response.content == b"Hello World"
def test_dns_spoofing(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("complex/dns_spoofing.py"))
original_host = "example.com"
host_header = Headers(host=original_host)
f = tflow.tflow(req=tutils.treq(headers=host_header, port=80))
tctx.master.addons.invoke_addon(sc, "requestheaders", f)
# Rewrite by reverse proxy mode
f.request.scheme = "https"
f.request.port = 443
tctx.master.addons.invoke_addon(sc, "request", f)
assert f.request.scheme == "http"
assert f.request.port == 80
assert f.request.headers["Host"] == original_host | test/examples/test_examples.py | from mitmproxy import contentviews
from mitmproxy.test import tflow
from mitmproxy.test import tutils
from mitmproxy.test import taddons
from mitmproxy.net.http import Headers
from ..mitmproxy import tservers
example_dir = tutils.test_data.push("../examples")
class TestScripts(tservers.MasterTest):
def test_add_header(self):
with taddons.context() as tctx:
a = tctx.script(example_dir.path("simple/add_header.py"))
f = tflow.tflow(resp=tutils.tresp())
a.response(f)
assert f.response.headers["newheader"] == "foo"
def test_custom_contentviews(self):
with taddons.context() as tctx:
tctx.script(example_dir.path("simple/custom_contentview.py"))
swapcase = contentviews.get("swapcase")
_, fmt = swapcase(b"<html>Test!</html>")
assert any(b'tEST!' in val[0][1] for val in fmt)
def test_iframe_injector(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/modify_body_inject_iframe.py"))
tctx.configure(
sc,
iframe = "http://example.org/evil_iframe"
)
f = tflow.tflow(
resp=tutils.tresp(content=b"<html><body>mitmproxy</body></html>")
)
tctx.master.addons.invoke_addon(sc, "response", f)
content = f.response.content
assert b'iframe' in content and b'evil_iframe' in content
def test_modify_form(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/modify_form.py"))
form_header = Headers(content_type="application/x-www-form-urlencoded")
f = tflow.tflow(req=tutils.treq(headers=form_header))
sc.request(f)
assert f.request.urlencoded_form["mitmproxy"] == "rocks"
f.request.headers["content-type"] = ""
sc.request(f)
assert list(f.request.urlencoded_form.items()) == [("foo", "bar")]
def test_modify_querystring(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/modify_querystring.py"))
f = tflow.tflow(req=tutils.treq(path="/search?q=term"))
sc.request(f)
assert f.request.query["mitmproxy"] == "rocks"
f.request.path = "/"
sc.request(f)
assert f.request.query["mitmproxy"] == "rocks"
def test_redirect_requests(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/redirect_requests.py"))
f = tflow.tflow(req=tutils.treq(host="example.org"))
sc.request(f)
assert f.request.host == "mitmproxy.org"
def test_send_reply_from_proxy(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("simple/send_reply_from_proxy.py"))
f = tflow.tflow(req=tutils.treq(host="example.com", port=80))
sc.request(f)
assert f.response.content == b"Hello World"
def test_dns_spoofing(self):
with taddons.context() as tctx:
sc = tctx.script(example_dir.path("complex/dns_spoofing.py"))
original_host = "example.com"
host_header = Headers(host=original_host)
f = tflow.tflow(req=tutils.treq(headers=host_header, port=80))
tctx.master.addons.invoke_addon(sc, "requestheaders", f)
# Rewrite by reverse proxy mode
f.request.scheme = "https"
f.request.port = 443
tctx.master.addons.invoke_addon(sc, "request", f)
assert f.request.scheme == "http"
assert f.request.port == 80
assert f.request.headers["Host"] == original_host | 0.451085 | 0.339937 |
import logging
import tornado.web
from sqlalchemy.orm import scoped_session
import bbtornado.models
from bbtornado.handlers import ThreadRequestContext
log = logging.getLogger('bbtornado.web')
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class Application(tornado.web.Application):
"""
The main Application, your application object is an instance of this class
"""
def __init__(self, handlers=None, default_host='', transforms=None, wsgi=False, user_model=None, domain=None, init_db=True,
sessionmaker_settings={},
create_engine_settings={},
**settings):
tornado_opts = bbtornado.config.tornado
if handlers: # append base url to handlers
base = tornado_opts.server.base
handlers = [(base + x[0],) + x[1:] for x in handlers]
# Init app settings with config.tornado and add override by passed args.
app_settings = {}
app_settings.update(tornado_opts.app_settings)
app_settings.update(settings)
super(Application, self).__init__(handlers=handlers, default_host=default_host,
transforms=transforms, wsgi=wsgi, **app_settings)
# Init engine settings with config.db and add overrider by passed args.
_create_engine_settings = {}
_create_engine_settings.update(bbtornado.config.db)
_create_engine_settings.update(create_engine_settings)
# Handle db_uri explicitely
db_uri = bbtornado.config.db.uri
_create_engine_settings.pop('uri', None)
# setup database engine
log.info('Using database from %s'%db_uri)
self.engine = create_engine(db_uri,
convert_unicode=True,
**_create_engine_settings)
if init_db:
bbtornado.models.init_db(self.engine)
self.Session = scoped_session(sessionmaker(bind=self.engine, **sessionmaker_settings), scopefunc=lambda: ThreadRequestContext.data.get('request', None))
# this allows the BaseHandler to get and set a model for self.current_user
self.user_model = user_model
# you can set this to override the domain for secure cookies
self.domain = domain | bbtornado/web.py | import logging
import tornado.web
from sqlalchemy.orm import scoped_session
import bbtornado.models
from bbtornado.handlers import ThreadRequestContext
log = logging.getLogger('bbtornado.web')
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class Application(tornado.web.Application):
"""
The main Application, your application object is an instance of this class
"""
def __init__(self, handlers=None, default_host='', transforms=None, wsgi=False, user_model=None, domain=None, init_db=True,
sessionmaker_settings={},
create_engine_settings={},
**settings):
tornado_opts = bbtornado.config.tornado
if handlers: # append base url to handlers
base = tornado_opts.server.base
handlers = [(base + x[0],) + x[1:] for x in handlers]
# Init app settings with config.tornado and add override by passed args.
app_settings = {}
app_settings.update(tornado_opts.app_settings)
app_settings.update(settings)
super(Application, self).__init__(handlers=handlers, default_host=default_host,
transforms=transforms, wsgi=wsgi, **app_settings)
# Init engine settings with config.db and add overrider by passed args.
_create_engine_settings = {}
_create_engine_settings.update(bbtornado.config.db)
_create_engine_settings.update(create_engine_settings)
# Handle db_uri explicitely
db_uri = bbtornado.config.db.uri
_create_engine_settings.pop('uri', None)
# setup database engine
log.info('Using database from %s'%db_uri)
self.engine = create_engine(db_uri,
convert_unicode=True,
**_create_engine_settings)
if init_db:
bbtornado.models.init_db(self.engine)
self.Session = scoped_session(sessionmaker(bind=self.engine, **sessionmaker_settings), scopefunc=lambda: ThreadRequestContext.data.get('request', None))
# this allows the BaseHandler to get and set a model for self.current_user
self.user_model = user_model
# you can set this to override the domain for secure cookies
self.domain = domain | 0.539226 | 0.054299 |
from odoo import fields, models
from odoo.tools import float_compare
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
qty_received = fields.Float(compute='_compute_qty_received', string="Received Qty", store=True)
def _compute_qty_received(self):
super(PurchaseOrderLine, self)._compute_qty_received()
for line in self.filtered(lambda x: x.move_ids and x.product_id.id not in x.move_ids.mapped('product_id').ids):
bom = self.env['mrp.bom']._bom_find(product=line.product_id, company_id=line.company_id.id)
if bom and bom.type == 'phantom':
line.qty_received = line._get_bom_delivered(bom=bom)
def _get_bom_delivered(self, bom=False):
self.ensure_one()
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
# In the case of a kit, we need to check if all components are received or not.
# nothing policy. A product can have several BoMs, we don't know which one was used when the
# receipt was created.
bom_delivered = {}
if bom:
bom_delivered[bom.id] = False
product_uom_qty_bom = self.product_uom._compute_quantity(self.product_qty, bom.product_uom_id) / bom.product_qty
boms, lines = bom.explode(self.product_id, product_uom_qty_bom)
for bom_line, data in lines:
qty = 0.0
for move in self.move_ids.filtered(lambda x: x.state == 'done' and x.product_id == bom_line.product_id):
qty += move.product_uom._compute_quantity(move.product_uom_qty, bom_line.product_uom_id)
if float_compare(qty, data['qty'], precision_digits=precision) < 0:
bom_delivered[bom.id] = False
break
else:
bom_delivered[bom.id] = True
if bom_delivered and any(bom_delivered.values()):
return self.product_qty
elif bom_delivered:
return 0.0 | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/purchase_mrp/models/purchase_mrp.py |
from odoo import fields, models
from odoo.tools import float_compare
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
qty_received = fields.Float(compute='_compute_qty_received', string="Received Qty", store=True)
def _compute_qty_received(self):
super(PurchaseOrderLine, self)._compute_qty_received()
for line in self.filtered(lambda x: x.move_ids and x.product_id.id not in x.move_ids.mapped('product_id').ids):
bom = self.env['mrp.bom']._bom_find(product=line.product_id, company_id=line.company_id.id)
if bom and bom.type == 'phantom':
line.qty_received = line._get_bom_delivered(bom=bom)
def _get_bom_delivered(self, bom=False):
self.ensure_one()
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
# In the case of a kit, we need to check if all components are received or not.
# nothing policy. A product can have several BoMs, we don't know which one was used when the
# receipt was created.
bom_delivered = {}
if bom:
bom_delivered[bom.id] = False
product_uom_qty_bom = self.product_uom._compute_quantity(self.product_qty, bom.product_uom_id) / bom.product_qty
boms, lines = bom.explode(self.product_id, product_uom_qty_bom)
for bom_line, data in lines:
qty = 0.0
for move in self.move_ids.filtered(lambda x: x.state == 'done' and x.product_id == bom_line.product_id):
qty += move.product_uom._compute_quantity(move.product_uom_qty, bom_line.product_uom_id)
if float_compare(qty, data['qty'], precision_digits=precision) < 0:
bom_delivered[bom.id] = False
break
else:
bom_delivered[bom.id] = True
if bom_delivered and any(bom_delivered.values()):
return self.product_qty
elif bom_delivered:
return 0.0 | 0.364891 | 0.243744 |
import itertools as it
def run_opcode(code_list):
"""Run the opcode as determined by the values in code_list
Before you enter the next loop, check to see if the opcode
(the first number in the sequence of 4) is 99. If it is, then
you can stop and return the code as it stands.
Parameters
----------
code_list : list
The opcode
"""
opcode, pos0, pos1, posout = 0, 0, 0, 0
for i in range(len(code_list) // 4):
# Read in the next 4 digits of the opcode
opcode, pos0, pos1, posout = code_list[i*4:(i+1)*4:]
# Add or multiply the values at positions 0 and 1 together
if opcode == 1:
output = code_list[pos0] + code_list[pos1]
elif opcode == 2:
output = code_list[pos0] * code_list[pos1]
# Put the output value in the output position
code_list[posout] = output
# Get the next round's opcode
opcode = code_list[(i+1)*4]
# Don't do anything if the opcode is 99.
# The code has stopped so you can stop!
if opcode == 99:
return code_list
def load_computer_data(fname):
"""Read in input file with the computer's opcode as provided.
Parameters
----------
fname : string
File provided by advent of code competition
"""
# Create empty code list
code_list = []
# Read in each line, and split by comma
with open(fname, 'r') as f:
for line in f:
code_list += line.split(',')
# Convert all items to integer
code_list = [int(item) for item in code_list]
return code_list
def adjust_data(code_list, noun=12, verb=2):
"""Set the computer to a desired state by adjusting the noun and verb
parameters.
Parameters
----------
code_list : list
opcode as provided by advent of code
noun : int, optional
the first parameter (in position 1), by default 12
verb : int, optional
the second parameter (in position 2), by default 2
"""
code_list[1] = noun
code_list[2] = verb
return code_list
def find_noun_verb(code_list, output=19690720):
"""Loop over lots of different pairs of nouns and verbs (the first two
parameters given to an opcode) to find the given output (the first value
in the whole list). Nouns and verbs are always between 0 and 99 inclusive
Parameters
----------
code_list : list
the opcode as given by advent of code
output : int, optional
the first value in the list, by default 19690720
"""
for noun, verb in it.product(range(100), range(100)):
code_list = load_computer_data('day02/input.txt')
code_list = adjust_data(code_list, noun=noun, verb=verb)
code_list = run_opcode(code_list)
if code_list[0] == output:
return noun, verb, code_list
if __name__ == "__main__":
"""Load in the data, adjust it to the state before the computer caught fire,
then run the opcode and print the value in position 0 to the screen.
"""
code_list = load_computer_data('day02/input.txt')
noun, verb, code_list = find_noun_verb(code_list, output=19690720)
print('\n---- Day 2, Puzzle 2 ----')
print(
f'For output: {code_list[0]}\n'
f' Noun: {noun}\n'
f' Verb: {verb}\n'
f'Answer to puzzle: {noun*100 + verb}') | day02/day02_puz2.py | import itertools as it
def run_opcode(code_list):
"""Run the opcode as determined by the values in code_list
Before you enter the next loop, check to see if the opcode
(the first number in the sequence of 4) is 99. If it is, then
you can stop and return the code as it stands.
Parameters
----------
code_list : list
The opcode
"""
opcode, pos0, pos1, posout = 0, 0, 0, 0
for i in range(len(code_list) // 4):
# Read in the next 4 digits of the opcode
opcode, pos0, pos1, posout = code_list[i*4:(i+1)*4:]
# Add or multiply the values at positions 0 and 1 together
if opcode == 1:
output = code_list[pos0] + code_list[pos1]
elif opcode == 2:
output = code_list[pos0] * code_list[pos1]
# Put the output value in the output position
code_list[posout] = output
# Get the next round's opcode
opcode = code_list[(i+1)*4]
# Don't do anything if the opcode is 99.
# The code has stopped so you can stop!
if opcode == 99:
return code_list
def load_computer_data(fname):
"""Read in input file with the computer's opcode as provided.
Parameters
----------
fname : string
File provided by advent of code competition
"""
# Create empty code list
code_list = []
# Read in each line, and split by comma
with open(fname, 'r') as f:
for line in f:
code_list += line.split(',')
# Convert all items to integer
code_list = [int(item) for item in code_list]
return code_list
def adjust_data(code_list, noun=12, verb=2):
"""Set the computer to a desired state by adjusting the noun and verb
parameters.
Parameters
----------
code_list : list
opcode as provided by advent of code
noun : int, optional
the first parameter (in position 1), by default 12
verb : int, optional
the second parameter (in position 2), by default 2
"""
code_list[1] = noun
code_list[2] = verb
return code_list
def find_noun_verb(code_list, output=19690720):
"""Loop over lots of different pairs of nouns and verbs (the first two
parameters given to an opcode) to find the given output (the first value
in the whole list). Nouns and verbs are always between 0 and 99 inclusive
Parameters
----------
code_list : list
the opcode as given by advent of code
output : int, optional
the first value in the list, by default 19690720
"""
for noun, verb in it.product(range(100), range(100)):
code_list = load_computer_data('day02/input.txt')
code_list = adjust_data(code_list, noun=noun, verb=verb)
code_list = run_opcode(code_list)
if code_list[0] == output:
return noun, verb, code_list
if __name__ == "__main__":
"""Load in the data, adjust it to the state before the computer caught fire,
then run the opcode and print the value in position 0 to the screen.
"""
code_list = load_computer_data('day02/input.txt')
noun, verb, code_list = find_noun_verb(code_list, output=19690720)
print('\n---- Day 2, Puzzle 2 ----')
print(
f'For output: {code_list[0]}\n'
f' Noun: {noun}\n'
f' Verb: {verb}\n'
f'Answer to puzzle: {noun*100 + verb}') | 0.665737 | 0.609001 |
import os
import random
import string
from typing import Optional
from idact.core.auth import KeyType
from idact.detail.auth.get_public_key_location import get_public_key_location
from idact.detail.log.get_logger import get_logger
KEY_NAME_PREFIX = {KeyType.RSA: 'id_rsa_'}
KEY_NAME_SUFFIX_LENGTH = 2
KEY_NAME_SUFFIX_RETRIES = 4
KEY_NAME_SUFFIX_MAX_LENGTH = 32
def get_key_suffix(length: int) -> str:
"""Returns a pseudo-random lowercase string.
:param length: Generated string length.
"""
return ''.join(random.choice(string.ascii_lowercase + string.digits)
for _ in range(length))
def get_key_path(location: str,
prefix: str,
suffix: str):
"""Constructs the key path from components.
:param location: Key directory.
:param prefix: Key name prefix.
:param suffix: Key name suffix.
"""
return os.path.join(location, prefix + suffix)
def try_generate_unique_path(suffix_length: int,
location: str,
prefix: str) -> Optional[str]:
"""Tries to generate a unique file name.
Returns None if the file already exists.
:param suffix_length: File name suffix length.
:param location: File parent dir.
:param prefix: File name prefix.
"""
log = get_logger(__name__)
suffix = get_key_suffix(length=suffix_length)
private_key_path = get_key_path(location=location,
prefix=prefix,
suffix=suffix)
if os.path.isfile(private_key_path):
log.warning("File exists: '%s'.", private_key_path)
return None
public_key_path = get_public_key_location(
private_key_location=private_key_path)
if os.path.isfile(public_key_path):
log.warning("File exists: '%s'.", public_key_path)
return None
return private_key_path
def get_free_private_key_location(key_type: KeyType) -> str:
"""Returns a path for a new private key.
The parent directory is determined by the environment variable
`IDACT_KEY_LOCATION`. If it's not set, `~/.ssh` is used.
:param key_type: Generated key type.
"""
location = os.environ.get('IDACT_KEY_LOCATION',
default=os.path.expanduser('~/.ssh'))
os.makedirs(location, exist_ok=True)
prefix = KEY_NAME_PREFIX[key_type]
key_path = None
suffix_length = KEY_NAME_SUFFIX_LENGTH
while suffix_length <= KEY_NAME_SUFFIX_MAX_LENGTH:
for _ in range(0, KEY_NAME_SUFFIX_RETRIES):
key_path = try_generate_unique_path(suffix_length=suffix_length,
location=location,
prefix=prefix)
if key_path is not None:
break
if key_path is not None:
break
suffix_length *= 2
if key_path is None:
raise RuntimeError("Unable to generate unique key filename.")
return key_path | idact/detail/auth/get_free_private_key_location.py | import os
import random
import string
from typing import Optional
from idact.core.auth import KeyType
from idact.detail.auth.get_public_key_location import get_public_key_location
from idact.detail.log.get_logger import get_logger
KEY_NAME_PREFIX = {KeyType.RSA: 'id_rsa_'}
KEY_NAME_SUFFIX_LENGTH = 2
KEY_NAME_SUFFIX_RETRIES = 4
KEY_NAME_SUFFIX_MAX_LENGTH = 32
def get_key_suffix(length: int) -> str:
"""Returns a pseudo-random lowercase string.
:param length: Generated string length.
"""
return ''.join(random.choice(string.ascii_lowercase + string.digits)
for _ in range(length))
def get_key_path(location: str,
prefix: str,
suffix: str):
"""Constructs the key path from components.
:param location: Key directory.
:param prefix: Key name prefix.
:param suffix: Key name suffix.
"""
return os.path.join(location, prefix + suffix)
def try_generate_unique_path(suffix_length: int,
location: str,
prefix: str) -> Optional[str]:
"""Tries to generate a unique file name.
Returns None if the file already exists.
:param suffix_length: File name suffix length.
:param location: File parent dir.
:param prefix: File name prefix.
"""
log = get_logger(__name__)
suffix = get_key_suffix(length=suffix_length)
private_key_path = get_key_path(location=location,
prefix=prefix,
suffix=suffix)
if os.path.isfile(private_key_path):
log.warning("File exists: '%s'.", private_key_path)
return None
public_key_path = get_public_key_location(
private_key_location=private_key_path)
if os.path.isfile(public_key_path):
log.warning("File exists: '%s'.", public_key_path)
return None
return private_key_path
def get_free_private_key_location(key_type: KeyType) -> str:
"""Returns a path for a new private key.
The parent directory is determined by the environment variable
`IDACT_KEY_LOCATION`. If it's not set, `~/.ssh` is used.
:param key_type: Generated key type.
"""
location = os.environ.get('IDACT_KEY_LOCATION',
default=os.path.expanduser('~/.ssh'))
os.makedirs(location, exist_ok=True)
prefix = KEY_NAME_PREFIX[key_type]
key_path = None
suffix_length = KEY_NAME_SUFFIX_LENGTH
while suffix_length <= KEY_NAME_SUFFIX_MAX_LENGTH:
for _ in range(0, KEY_NAME_SUFFIX_RETRIES):
key_path = try_generate_unique_path(suffix_length=suffix_length,
location=location,
prefix=prefix)
if key_path is not None:
break
if key_path is not None:
break
suffix_length *= 2
if key_path is None:
raise RuntimeError("Unable to generate unique key filename.")
return key_path | 0.842734 | 0.109801 |
from __future__ import annotations
import requests
from pydantic import BaseModel # , validator
class OlId(BaseModel):
key: str
class OlType(BaseModel):
type: str
value: str
class Identifier(BaseModel):
goodreads: list[str]
librarything: list[str]
class Work(BaseModel):
authors: list[OlId]
classifications: dict
contributions: list[str]
covers: list[int]
created: OlType
first_sentence: OlType
identifiers: Identifier
isbn_10: list[str]
isbn_13: list[str]
key: str
languages: list[OlId]
last_modified: OlType
latest_revision: int
local_id: list[str]
number_of_pages: int
ocaid: str
publish_date: str
publishers: list[str]
revision: int
source_records: list[str]
title: str
type: OlId
works: list[OlId]
def get_openlibrary_data(olid: str) -> dict:
"""
Given an 'isbn/0140328726', return book data from Open Library as a Python dict.
Given an '/authors/OL34184A', return authors data as a Python dict.
This code must work for olids with or without a leading slash ('/').
# Comment out doctests if they take too long or have results that may change
# >>> get_openlibrary_data(olid='isbn/0140328726') # doctest: +ELLIPSIS
{'publishers': ['Puffin'], 'number_of_pages': 96, 'isbn_10': ['0140328726'], ...
# >>> get_openlibrary_data(olid='/authors/OL7353617A') # doctest: +ELLIPSIS
{'name': '<NAME>', 'created': {'type': '/type/datetime', ...
"""
new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes
if new_olid.count("/") != 1:
raise ValueError(f"{olid} is not a valid Open Library olid")
url = f"https://openlibrary.org/{new_olid}.json"
print(f"Gathering data from {url}.")
# NOTE: json.JSONDecodeError may be raised if the record cannot be found.
return requests.get(url).json()
if __name__ == "__main__": # https://openlibrary.org/works/OL19545135W.json
# Create and Open Library Work from a dict returned from requests
work = Work(**get_openlibrary_data("isbn/0140328726")) # isbn/0425016013"
print(work)
print(f"\nLazy loading {len(work.authors)} authors...\n")
for i, author_olid in enumerate(work.authors):
work.authors[i] = get_openlibrary_data(author_olid.key) # type: ignore
print(work)
print(f"\nLazy loading {len(work.languages)} languages...\n")
for i, language_olid in enumerate(work.languages):
work.languages[i] = get_openlibrary_data(language_olid.key) # type: ignore
print(work)
# If is a bad idea to shadow a Python builtin like `type`
# print(f"\nLazy loading the type...\n")
# work.type = get_openlibrary_data(type.key)
# print(work) | src/Books/work.py | from __future__ import annotations
import requests
from pydantic import BaseModel # , validator
class OlId(BaseModel):
key: str
class OlType(BaseModel):
type: str
value: str
class Identifier(BaseModel):
goodreads: list[str]
librarything: list[str]
class Work(BaseModel):
authors: list[OlId]
classifications: dict
contributions: list[str]
covers: list[int]
created: OlType
first_sentence: OlType
identifiers: Identifier
isbn_10: list[str]
isbn_13: list[str]
key: str
languages: list[OlId]
last_modified: OlType
latest_revision: int
local_id: list[str]
number_of_pages: int
ocaid: str
publish_date: str
publishers: list[str]
revision: int
source_records: list[str]
title: str
type: OlId
works: list[OlId]
def get_openlibrary_data(olid: str) -> dict:
"""
Given an 'isbn/0140328726', return book data from Open Library as a Python dict.
Given an '/authors/OL34184A', return authors data as a Python dict.
This code must work for olids with or without a leading slash ('/').
# Comment out doctests if they take too long or have results that may change
# >>> get_openlibrary_data(olid='isbn/0140328726') # doctest: +ELLIPSIS
{'publishers': ['Puffin'], 'number_of_pages': 96, 'isbn_10': ['0140328726'], ...
# >>> get_openlibrary_data(olid='/authors/OL7353617A') # doctest: +ELLIPSIS
{'name': '<NAME>', 'created': {'type': '/type/datetime', ...
"""
new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes
if new_olid.count("/") != 1:
raise ValueError(f"{olid} is not a valid Open Library olid")
url = f"https://openlibrary.org/{new_olid}.json"
print(f"Gathering data from {url}.")
# NOTE: json.JSONDecodeError may be raised if the record cannot be found.
return requests.get(url).json()
if __name__ == "__main__": # https://openlibrary.org/works/OL19545135W.json
# Create and Open Library Work from a dict returned from requests
work = Work(**get_openlibrary_data("isbn/0140328726")) # isbn/0425016013"
print(work)
print(f"\nLazy loading {len(work.authors)} authors...\n")
for i, author_olid in enumerate(work.authors):
work.authors[i] = get_openlibrary_data(author_olid.key) # type: ignore
print(work)
print(f"\nLazy loading {len(work.languages)} languages...\n")
for i, language_olid in enumerate(work.languages):
work.languages[i] = get_openlibrary_data(language_olid.key) # type: ignore
print(work)
# If is a bad idea to shadow a Python builtin like `type`
# print(f"\nLazy loading the type...\n")
# work.type = get_openlibrary_data(type.key)
# print(work) | 0.53437 | 0.19789 |
import logging
import sys
class Error(Exception):
"""Base class for Telemetry exceptions."""
def __init__(self, msg=''):
super(Error, self).__init__(msg)
self._debugging_messages = []
def AddDebuggingMessage(self, msg):
"""Adds a message to the description of the exception.
Many Telemetry exceptions arise from failures in another application. These
failures are difficult to pinpoint. This method allows Telemetry classes to
append useful debugging information to the exception. This method also logs
information about the location from where it was called.
"""
frame = sys._getframe(1)
line_number = frame.f_lineno
file_name = frame.f_code.co_filename
function_name = frame.f_code.co_name
call_site = '%s:%s %s' % (file_name, line_number, function_name)
annotated_message = '(%s) %s' % (call_site, msg)
self._debugging_messages.append(annotated_message)
def __str__(self):
divider = '\n' + '*' * 80 + '\n'
output = super(Error, self).__str__()
for message in self._debugging_messages:
output += divider
output += message
return output
class PlatformError(Error):
""" Represents an exception thrown when constructing platform. """
class TimeoutException(Error):
"""The operation failed to complete because of a timeout.
It is possible that waiting for a longer period of time would result in a
successful operation.
"""
pass
class AppCrashException(Error):
def __init__(self, app=None, msg=''):
super(AppCrashException, self).__init__(msg)
self._msg = msg
self._is_valid_dump = False
self._stack_trace = []
self._app_stdout = []
self._minidump_path = ''
self._system_log = '(Not implemented)'
if app:
try:
system_log = app.platform.GetSystemLog()
if system_log:
self._system_log = system_log
except Exception: # pylint: disable=broad-except
logging.exception('Problem when trying to gather system log:')
try:
self._is_valid_dump, trace_output = app.GetStackTrace()
self._stack_trace = trace_output.splitlines()
self._minidump_path = app.GetMostRecentMinidumpPath()
except Exception: # pylint: disable=broad-except
logging.exception('Problem when trying to gather stack trace:')
try:
self._app_stdout = app.GetStandardOutput().splitlines()
except Exception: # pylint: disable=broad-except
logging.exception('Problem when trying to gather standard output:')
@property
def stack_trace(self):
return self._stack_trace
@property
def minidump_path(self):
return self._minidump_path
@property
def is_valid_dump(self):
return self._is_valid_dump
def __str__(self):
divider = '*' * 80
debug_messages = []
debug_messages.append(super(AppCrashException, self).__str__())
debug_messages.append('Found Minidump: %s' % self._is_valid_dump)
debug_messages.append('Stack Trace:')
debug_messages.append(divider)
debug_messages.extend(('\t%s' % l) for l in self._stack_trace)
debug_messages.append(divider)
debug_messages.append('Standard output:')
debug_messages.append(divider)
debug_messages.extend(('\t%s' % l) for l in self._app_stdout)
debug_messages.append(divider)
debug_messages.append('System log:')
debug_messages.append(self._system_log)
return '\n'.join(debug_messages)
class DevtoolsTargetCrashException(AppCrashException):
"""Represents a crash of the current devtools target but not the overall app.
This can be a tab or a WebView. In this state, the tab/WebView is
gone, but the underlying browser is still alive.
"""
def __init__(self, app, msg='Devtools target crashed'):
super(DevtoolsTargetCrashException, self).__init__(app, msg)
class BrowserGoneException(AppCrashException):
"""Represents a crash of the entire browser.
In this state, all bets are pretty much off."""
def __init__(self, app, msg='Browser crashed'):
super(BrowserGoneException, self).__init__(app, msg)
class BrowserConnectionGoneException(BrowserGoneException):
"""Represents a browser that still exists but cannot be reached."""
def __init__(self, app, msg='Browser exists but the connection is gone'):
super(BrowserConnectionGoneException, self).__init__(app, msg)
class TabMissingError(Error):
"""Represents an error when an expected browser tab is not found."""
class ProcessGoneException(Error):
"""Represents a process that no longer exists for an unknown reason."""
class IntentionalException(Error):
"""Represent an exception raised by a unittest which is not printed."""
class InitializationError(Error):
def __init__(self, string):
super(InitializationError, self).__init__(string)
class LoginException(Error):
pass
class EvaluateException(Error):
def __init__(self, text='', class_name='', description=None):
super(EvaluateException, self).__init__(text)
self._class_name = class_name
self._description = description
def __str__(self):
output = super(EvaluateException, self).__str__()
if self._class_name and self._description:
output += '%s:\n%s' % (self._class_name, self._description)
return output
class StoryActionError(Error):
"""Represents an error when trying to perform an action on a story."""
class TracingException(Error):
"""Represents an error that ocurred while collecting or flushing traces."""
class PathMissingError(Error):
"""Represents an exception thrown when an expected path doesn't exist."""
class UnknownPackageError(Error):
"""Represents an exception when encountering an unsupported Android APK."""
class PackageDetectionError(Error):
"""Represents an error when parsing an Android APK's package."""
class AndroidDeviceParsingError(Error):
"""Represents an error when parsing output from an android device.""" | third_party/catapult/telemetry/telemetry/core/exceptions.py | import logging
import sys
class Error(Exception):
"""Base class for Telemetry exceptions."""
def __init__(self, msg=''):
super(Error, self).__init__(msg)
self._debugging_messages = []
def AddDebuggingMessage(self, msg):
"""Adds a message to the description of the exception.
Many Telemetry exceptions arise from failures in another application. These
failures are difficult to pinpoint. This method allows Telemetry classes to
append useful debugging information to the exception. This method also logs
information about the location from where it was called.
"""
frame = sys._getframe(1)
line_number = frame.f_lineno
file_name = frame.f_code.co_filename
function_name = frame.f_code.co_name
call_site = '%s:%s %s' % (file_name, line_number, function_name)
annotated_message = '(%s) %s' % (call_site, msg)
self._debugging_messages.append(annotated_message)
def __str__(self):
divider = '\n' + '*' * 80 + '\n'
output = super(Error, self).__str__()
for message in self._debugging_messages:
output += divider
output += message
return output
class PlatformError(Error):
""" Represents an exception thrown when constructing platform. """
class TimeoutException(Error):
"""The operation failed to complete because of a timeout.
It is possible that waiting for a longer period of time would result in a
successful operation.
"""
pass
class AppCrashException(Error):
def __init__(self, app=None, msg=''):
super(AppCrashException, self).__init__(msg)
self._msg = msg
self._is_valid_dump = False
self._stack_trace = []
self._app_stdout = []
self._minidump_path = ''
self._system_log = '(Not implemented)'
if app:
try:
system_log = app.platform.GetSystemLog()
if system_log:
self._system_log = system_log
except Exception: # pylint: disable=broad-except
logging.exception('Problem when trying to gather system log:')
try:
self._is_valid_dump, trace_output = app.GetStackTrace()
self._stack_trace = trace_output.splitlines()
self._minidump_path = app.GetMostRecentMinidumpPath()
except Exception: # pylint: disable=broad-except
logging.exception('Problem when trying to gather stack trace:')
try:
self._app_stdout = app.GetStandardOutput().splitlines()
except Exception: # pylint: disable=broad-except
logging.exception('Problem when trying to gather standard output:')
@property
def stack_trace(self):
return self._stack_trace
@property
def minidump_path(self):
return self._minidump_path
@property
def is_valid_dump(self):
return self._is_valid_dump
def __str__(self):
divider = '*' * 80
debug_messages = []
debug_messages.append(super(AppCrashException, self).__str__())
debug_messages.append('Found Minidump: %s' % self._is_valid_dump)
debug_messages.append('Stack Trace:')
debug_messages.append(divider)
debug_messages.extend(('\t%s' % l) for l in self._stack_trace)
debug_messages.append(divider)
debug_messages.append('Standard output:')
debug_messages.append(divider)
debug_messages.extend(('\t%s' % l) for l in self._app_stdout)
debug_messages.append(divider)
debug_messages.append('System log:')
debug_messages.append(self._system_log)
return '\n'.join(debug_messages)
class DevtoolsTargetCrashException(AppCrashException):
"""Represents a crash of the current devtools target but not the overall app.
This can be a tab or a WebView. In this state, the tab/WebView is
gone, but the underlying browser is still alive.
"""
def __init__(self, app, msg='Devtools target crashed'):
super(DevtoolsTargetCrashException, self).__init__(app, msg)
class BrowserGoneException(AppCrashException):
"""Represents a crash of the entire browser.
In this state, all bets are pretty much off."""
def __init__(self, app, msg='Browser crashed'):
super(BrowserGoneException, self).__init__(app, msg)
class BrowserConnectionGoneException(BrowserGoneException):
"""Represents a browser that still exists but cannot be reached."""
def __init__(self, app, msg='Browser exists but the connection is gone'):
super(BrowserConnectionGoneException, self).__init__(app, msg)
class TabMissingError(Error):
"""Represents an error when an expected browser tab is not found."""
class ProcessGoneException(Error):
"""Represents a process that no longer exists for an unknown reason."""
class IntentionalException(Error):
"""Represent an exception raised by a unittest which is not printed."""
class InitializationError(Error):
def __init__(self, string):
super(InitializationError, self).__init__(string)
class LoginException(Error):
pass
class EvaluateException(Error):
def __init__(self, text='', class_name='', description=None):
super(EvaluateException, self).__init__(text)
self._class_name = class_name
self._description = description
def __str__(self):
output = super(EvaluateException, self).__str__()
if self._class_name and self._description:
output += '%s:\n%s' % (self._class_name, self._description)
return output
class StoryActionError(Error):
"""Represents an error when trying to perform an action on a story."""
class TracingException(Error):
"""Represents an error that ocurred while collecting or flushing traces."""
class PathMissingError(Error):
"""Represents an exception thrown when an expected path doesn't exist."""
class UnknownPackageError(Error):
"""Represents an exception when encountering an unsupported Android APK."""
class PackageDetectionError(Error):
"""Represents an error when parsing an Android APK's package."""
class AndroidDeviceParsingError(Error):
"""Represents an error when parsing output from an android device.""" | 0.538741 | 0.112747 |
from charms.reactive import is_state, remove_state, set_state, when, when_any, when_none, when_not
from charmhelpers.core import hookenv
from charms.layer.apache_bigtop_base import Bigtop, get_hadoop_version
@when('hadoop-plugin.joined')
@when_not('namenode.joined')
def blocked(principal):
hookenv.status_set('blocked', 'missing required namenode relation')
@when('bigtop.available', 'hadoop-plugin.joined', 'namenode.joined')
@when_not('apache-bigtop-plugin.hdfs.installed')
def install_hadoop_client_hdfs(principal, namenode):
"""Install if the namenode has sent its FQDN.
We only need the namenode FQDN to perform the plugin install, so poll for
namenodes() data whenever we have a namenode relation. This allows us to
install asap, even if 'namenode.ready' is not set yet.
"""
if namenode.namenodes():
hookenv.status_set('maintenance', 'installing plugin (hdfs)')
nn_host = namenode.namenodes()[0]
bigtop = Bigtop()
hosts = {'namenode': nn_host}
bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
bigtop.trigger_puppet()
set_state('apache-bigtop-plugin.hdfs.installed')
hookenv.application_version_set(get_hadoop_version())
hookenv.status_set('maintenance', 'plugin (hdfs) installed')
else:
hookenv.status_set('waiting', 'waiting for namenode fqdn')
@when('apache-bigtop-plugin.hdfs.installed')
@when('hadoop-plugin.joined', 'namenode.joined')
@when_not('namenode.ready')
def send_nn_spec(principal, namenode):
"""Send our plugin spec so the namenode can become ready."""
bigtop = Bigtop()
# Send plugin spec (must match NN spec for 'namenode.ready' to be set)
namenode.set_local_spec(bigtop.spec())
@when('apache-bigtop-plugin.hdfs.installed')
@when('hadoop-plugin.joined', 'namenode.ready')
@when_not('apache-bigtop-plugin.hdfs.ready')
def send_principal_hdfs_info(principal, namenode):
"""Send HDFS data when the namenode becomes ready."""
principal.set_installed(get_hadoop_version())
principal.set_hdfs_ready(namenode.namenodes(), namenode.port())
set_state('apache-bigtop-plugin.hdfs.ready')
@when('apache-bigtop-plugin.hdfs.ready')
@when('hadoop-plugin.joined')
@when_not('namenode.ready')
def clear_hdfs_ready(principal):
principal.clear_hdfs_ready()
remove_state('apache-bigtop-plugin.hdfs.ready')
remove_state('apache-bigtop-plugin.hdfs.installed')
@when('bigtop.available', 'hadoop-plugin.joined', 'namenode.joined', 'resourcemanager.joined')
@when_not('apache-bigtop-plugin.yarn.installed')
def install_hadoop_client_yarn(principal, namenode, resourcemanager):
if namenode.namenodes() and resourcemanager.resourcemanagers():
hookenv.status_set('maintenance', 'installing plugin (yarn)')
nn_host = namenode.namenodes()[0]
rm_host = resourcemanager.resourcemanagers()[0]
bigtop = Bigtop()
hosts = {'namenode': nn_host, 'resourcemanager': rm_host}
bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
bigtop.trigger_puppet()
set_state('apache-bigtop-plugin.yarn.installed')
hookenv.status_set('maintenance', 'plugin (yarn) installed')
else:
hookenv.status_set('waiting', 'waiting for master fqdns')
@when('apache-bigtop-plugin.yarn.installed')
@when('hadoop-plugin.joined', 'resourcemanager.joined')
@when_not('resourcemanager.ready')
def send_rm_spec(principal, resourcemanager):
"""Send our plugin spec so the resourcemanager can become ready."""
bigtop = Bigtop()
resourcemanager.set_local_spec(bigtop.spec())
@when('apache-bigtop-plugin.yarn.installed')
@when('hadoop-plugin.joined', 'resourcemanager.ready')
@when_not('apache-bigtop-plugin.yarn.ready')
def send_principal_yarn_info(principal, resourcemanager):
"""Send YARN data when the resourcemanager becomes ready."""
principal.set_installed(get_hadoop_version())
principal.set_yarn_ready(
resourcemanager.resourcemanagers(), resourcemanager.port(),
resourcemanager.hs_http(), resourcemanager.hs_ipc())
set_state('apache-bigtop-plugin.yarn.ready')
@when('apache-bigtop-plugin.yarn.ready')
@when('hadoop-plugin.joined')
@when_not('resourcemanager.ready')
def clear_yarn_ready(principal):
principal.clear_yarn_ready()
remove_state('apache-bigtop-plugin.yarn.ready')
remove_state('apache-bigtop-plugin.yarn.installed')
@when_any('apache-bigtop-plugin.hdfs.installed', 'apache-bigtop-plugin.yarn.installed')
@when('hadoop-plugin.joined')
@when_none('namenode.spec.mismatch', 'resourcemanager.spec.mismatch')
def update_status(principal):
hdfs_rel = is_state('namenode.joined')
yarn_rel = is_state('resourcemanager.joined')
hdfs_ready = is_state('namenode.ready')
yarn_ready = is_state('resourcemanager.ready')
if not (hdfs_rel or yarn_rel):
hookenv.status_set('blocked',
'missing namenode and/or resourcemanager relation')
elif hdfs_rel and not hdfs_ready:
hookenv.status_set('waiting', 'waiting for hdfs')
elif yarn_rel and not yarn_ready:
hookenv.status_set('waiting', 'waiting for yarn')
else:
ready = []
if hdfs_ready:
ready.append('hdfs')
if yarn_ready:
ready.append('yarn')
hookenv.status_set('active', 'ready ({})'.format(' & '.join(ready))) | bigtop-packages/src/charm/hadoop/layer-hadoop-plugin/reactive/apache_bigtop_plugin.py |
from charms.reactive import is_state, remove_state, set_state, when, when_any, when_none, when_not
from charmhelpers.core import hookenv
from charms.layer.apache_bigtop_base import Bigtop, get_hadoop_version
@when('hadoop-plugin.joined')
@when_not('namenode.joined')
def blocked(principal):
hookenv.status_set('blocked', 'missing required namenode relation')
@when('bigtop.available', 'hadoop-plugin.joined', 'namenode.joined')
@when_not('apache-bigtop-plugin.hdfs.installed')
def install_hadoop_client_hdfs(principal, namenode):
"""Install if the namenode has sent its FQDN.
We only need the namenode FQDN to perform the plugin install, so poll for
namenodes() data whenever we have a namenode relation. This allows us to
install asap, even if 'namenode.ready' is not set yet.
"""
if namenode.namenodes():
hookenv.status_set('maintenance', 'installing plugin (hdfs)')
nn_host = namenode.namenodes()[0]
bigtop = Bigtop()
hosts = {'namenode': nn_host}
bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
bigtop.trigger_puppet()
set_state('apache-bigtop-plugin.hdfs.installed')
hookenv.application_version_set(get_hadoop_version())
hookenv.status_set('maintenance', 'plugin (hdfs) installed')
else:
hookenv.status_set('waiting', 'waiting for namenode fqdn')
@when('apache-bigtop-plugin.hdfs.installed')
@when('hadoop-plugin.joined', 'namenode.joined')
@when_not('namenode.ready')
def send_nn_spec(principal, namenode):
"""Send our plugin spec so the namenode can become ready."""
bigtop = Bigtop()
# Send plugin spec (must match NN spec for 'namenode.ready' to be set)
namenode.set_local_spec(bigtop.spec())
@when('apache-bigtop-plugin.hdfs.installed')
@when('hadoop-plugin.joined', 'namenode.ready')
@when_not('apache-bigtop-plugin.hdfs.ready')
def send_principal_hdfs_info(principal, namenode):
"""Send HDFS data when the namenode becomes ready."""
principal.set_installed(get_hadoop_version())
principal.set_hdfs_ready(namenode.namenodes(), namenode.port())
set_state('apache-bigtop-plugin.hdfs.ready')
@when('apache-bigtop-plugin.hdfs.ready')
@when('hadoop-plugin.joined')
@when_not('namenode.ready')
def clear_hdfs_ready(principal):
principal.clear_hdfs_ready()
remove_state('apache-bigtop-plugin.hdfs.ready')
remove_state('apache-bigtop-plugin.hdfs.installed')
@when('bigtop.available', 'hadoop-plugin.joined', 'namenode.joined', 'resourcemanager.joined')
@when_not('apache-bigtop-plugin.yarn.installed')
def install_hadoop_client_yarn(principal, namenode, resourcemanager):
if namenode.namenodes() and resourcemanager.resourcemanagers():
hookenv.status_set('maintenance', 'installing plugin (yarn)')
nn_host = namenode.namenodes()[0]
rm_host = resourcemanager.resourcemanagers()[0]
bigtop = Bigtop()
hosts = {'namenode': nn_host, 'resourcemanager': rm_host}
bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
bigtop.trigger_puppet()
set_state('apache-bigtop-plugin.yarn.installed')
hookenv.status_set('maintenance', 'plugin (yarn) installed')
else:
hookenv.status_set('waiting', 'waiting for master fqdns')
@when('apache-bigtop-plugin.yarn.installed')
@when('hadoop-plugin.joined', 'resourcemanager.joined')
@when_not('resourcemanager.ready')
def send_rm_spec(principal, resourcemanager):
"""Send our plugin spec so the resourcemanager can become ready."""
bigtop = Bigtop()
resourcemanager.set_local_spec(bigtop.spec())
@when('apache-bigtop-plugin.yarn.installed')
@when('hadoop-plugin.joined', 'resourcemanager.ready')
@when_not('apache-bigtop-plugin.yarn.ready')
def send_principal_yarn_info(principal, resourcemanager):
"""Send YARN data when the resourcemanager becomes ready."""
principal.set_installed(get_hadoop_version())
principal.set_yarn_ready(
resourcemanager.resourcemanagers(), resourcemanager.port(),
resourcemanager.hs_http(), resourcemanager.hs_ipc())
set_state('apache-bigtop-plugin.yarn.ready')
@when('apache-bigtop-plugin.yarn.ready')
@when('hadoop-plugin.joined')
@when_not('resourcemanager.ready')
def clear_yarn_ready(principal):
principal.clear_yarn_ready()
remove_state('apache-bigtop-plugin.yarn.ready')
remove_state('apache-bigtop-plugin.yarn.installed')
@when_any('apache-bigtop-plugin.hdfs.installed', 'apache-bigtop-plugin.yarn.installed')
@when('hadoop-plugin.joined')
@when_none('namenode.spec.mismatch', 'resourcemanager.spec.mismatch')
def update_status(principal):
hdfs_rel = is_state('namenode.joined')
yarn_rel = is_state('resourcemanager.joined')
hdfs_ready = is_state('namenode.ready')
yarn_ready = is_state('resourcemanager.ready')
if not (hdfs_rel or yarn_rel):
hookenv.status_set('blocked',
'missing namenode and/or resourcemanager relation')
elif hdfs_rel and not hdfs_ready:
hookenv.status_set('waiting', 'waiting for hdfs')
elif yarn_rel and not yarn_ready:
hookenv.status_set('waiting', 'waiting for yarn')
else:
ready = []
if hdfs_ready:
ready.append('hdfs')
if yarn_ready:
ready.append('yarn')
hookenv.status_set('active', 'ready ({})'.format(' & '.join(ready))) | 0.51562 | 0.113236 |
import gym
import numpy as np
import cv2
from gym import spaces
class MLToGymEnv(gym.Env):
def __init__(self, env, train_mode, reward_range=(-np.inf, np.inf)):
"""Wraps UnityEnvironment of ML-Agents to be used by baselines algorithms
"""
gym.Env.__init__(self)
self.unityEnv = env
self.train_mode = train_mode
self.reward_range = reward_range
assert self.unityEnv.number_external_brains > 0, "No external brains defined in unityEnv"
self.__externalBrainName = self.unityEnv.external_brain_names[0]
externalBrain = self.unityEnv.brains[self.__externalBrainName]
actionSpaceSize = externalBrain.vector_action_space_size
assert actionSpaceSize > 0
self.action_space = spaces.Discrete(actionSpaceSize)
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8) # TODO actually read dimensions from brain info
# TODO set observation space according to brain
def step(self, action):
action_vector = {}
action_vector[self.__externalBrainName] = [action] # needs to be list in case of multiple agents, TODO: support more than one agent
brain_infos = self.unityEnv.step(action_vector)
brain_info = brain_infos[self.__externalBrainName]
obs = brain_info.visual_observations[0][0]
reward = brain_info.rewards[0]
done = brain_info.local_done[0]
info = None
return obs, reward, done, info
def reset(self):
obs_dict = self.unityEnv.reset(train_mode=self.train_mode)
# observations of used external brain -> visual observation -> of camera 0 of agent 0
return obs_dict[self.__externalBrainName].visual_observations[0][0]
def render(self, mode='human'):
raise NotImplementedError
def close(self):
return self.unityEnv.close()
def seed(self, seed=None):
raise NotImplementedError
class FloatToUInt8Frame(gym.ObservationWrapper):
def __init__(self, env):
"""Convert observation image from float64 to uint8"""
gym.ObservationWrapper.__init__(self, env)
def observation(self, frame):
# convert from float64, range 0 - 1 to uint8, range 0 - 255
frame = 255 * frame
frame = frame.astype(np.uint8)
frame = frame[...,::-1] #convert to bgr for opencv imshow
return frame | run/baselines_wrapper.py | import gym
import numpy as np
import cv2
from gym import spaces
class MLToGymEnv(gym.Env):
def __init__(self, env, train_mode, reward_range=(-np.inf, np.inf)):
"""Wraps UnityEnvironment of ML-Agents to be used by baselines algorithms
"""
gym.Env.__init__(self)
self.unityEnv = env
self.train_mode = train_mode
self.reward_range = reward_range
assert self.unityEnv.number_external_brains > 0, "No external brains defined in unityEnv"
self.__externalBrainName = self.unityEnv.external_brain_names[0]
externalBrain = self.unityEnv.brains[self.__externalBrainName]
actionSpaceSize = externalBrain.vector_action_space_size
assert actionSpaceSize > 0
self.action_space = spaces.Discrete(actionSpaceSize)
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8) # TODO actually read dimensions from brain info
# TODO set observation space according to brain
def step(self, action):
action_vector = {}
action_vector[self.__externalBrainName] = [action] # needs to be list in case of multiple agents, TODO: support more than one agent
brain_infos = self.unityEnv.step(action_vector)
brain_info = brain_infos[self.__externalBrainName]
obs = brain_info.visual_observations[0][0]
reward = brain_info.rewards[0]
done = brain_info.local_done[0]
info = None
return obs, reward, done, info
def reset(self):
obs_dict = self.unityEnv.reset(train_mode=self.train_mode)
# observations of used external brain -> visual observation -> of camera 0 of agent 0
return obs_dict[self.__externalBrainName].visual_observations[0][0]
def render(self, mode='human'):
raise NotImplementedError
def close(self):
return self.unityEnv.close()
def seed(self, seed=None):
raise NotImplementedError
class FloatToUInt8Frame(gym.ObservationWrapper):
def __init__(self, env):
"""Convert observation image from float64 to uint8"""
gym.ObservationWrapper.__init__(self, env)
def observation(self, frame):
# convert from float64, range 0 - 1 to uint8, range 0 - 255
frame = 255 * frame
frame = frame.astype(np.uint8)
frame = frame[...,::-1] #convert to bgr for opencv imshow
return frame | 0.380414 | 0.445952 |
from pathlib import Path
import argparse
import pickle
import pandas as pd
import numpy as np
def softmax(x):
"""
>>> res = softmax(np.array([0, 200, 10]))
>>> np.sum(res)
1.0
>>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001)
True
>>> res = softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0, 10]]))
>>> np.sum(res, axis=1)
array([ 1., 1., 1.])
>>> res = softmax(np.array([[0, 200, 10], [0, 10, 200]]))
>>> np.sum(res, axis=1)
array([ 1., 1.])
"""
if x.ndim == 1:
x = x.reshape((1, -1))
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
def fuse_scores(scores_dict):
modalities_combinations = [('rgb', 'flow'), ('rgb', 'spec'),
('flow', 'spec'), ('rgb', 'flow', 'spec')]
fused_scores = {}
for mod_comb in modalities_combinations:
name = '_'.join(mod_comb)
fused_scores[name] = {'scores': {}}
for task in ['verb', 'noun']:
scores_list = [scores_dict[m]['scores'][task] for m in mod_comb]
scores_list = [softmax(scores.mean(axis=(1, 2))) for scores in scores_list]
fused_scores[name]['scores'][task] = np.mean(scores_list, axis=0)
return fused_scores
def main(args):
for split in ['seen', 'unseen']:
rgb_scores = pd.read_pickle(args.rgb / ('test_' + split + '.pkl'))
flow_scores = pd.read_pickle(args.flow / ('test_' + split + '.pkl'))
spec_scores = pd.read_pickle(args.spec / ('test_' + split + '.pkl'))
scores_dict = {'rgb': rgb_scores, 'flow': flow_scores, 'spec': spec_scores}
fused_scores = fuse_scores(scores_dict)
for key in fused_scores.keys():
output_dir = args.scores_root / key / ('test_' + split + '.pkl')
output_dir.parent.mkdir(parents=True, exist_ok=True)
with open(output_dir, 'wb') as f:
pickle.dump(fused_scores[key], f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('scores_root', type=Path)
parser.add_argument('--rgb', type=Path, help='Directory of the RGB scores')
parser.add_argument('--flow', type=Path, help='Directory of the Flow scores')
parser.add_argument('--spec', type=Path, help='Directory of the Spectrogram scores')
args = parser.parse_args()
main(args) | fuse_results_epic.py | from pathlib import Path
import argparse
import pickle
import pandas as pd
import numpy as np
def softmax(x):
"""
>>> res = softmax(np.array([0, 200, 10]))
>>> np.sum(res)
1.0
>>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001)
True
>>> res = softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0, 10]]))
>>> np.sum(res, axis=1)
array([ 1., 1., 1.])
>>> res = softmax(np.array([[0, 200, 10], [0, 10, 200]]))
>>> np.sum(res, axis=1)
array([ 1., 1.])
"""
if x.ndim == 1:
x = x.reshape((1, -1))
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
def fuse_scores(scores_dict):
modalities_combinations = [('rgb', 'flow'), ('rgb', 'spec'),
('flow', 'spec'), ('rgb', 'flow', 'spec')]
fused_scores = {}
for mod_comb in modalities_combinations:
name = '_'.join(mod_comb)
fused_scores[name] = {'scores': {}}
for task in ['verb', 'noun']:
scores_list = [scores_dict[m]['scores'][task] for m in mod_comb]
scores_list = [softmax(scores.mean(axis=(1, 2))) for scores in scores_list]
fused_scores[name]['scores'][task] = np.mean(scores_list, axis=0)
return fused_scores
def main(args):
for split in ['seen', 'unseen']:
rgb_scores = pd.read_pickle(args.rgb / ('test_' + split + '.pkl'))
flow_scores = pd.read_pickle(args.flow / ('test_' + split + '.pkl'))
spec_scores = pd.read_pickle(args.spec / ('test_' + split + '.pkl'))
scores_dict = {'rgb': rgb_scores, 'flow': flow_scores, 'spec': spec_scores}
fused_scores = fuse_scores(scores_dict)
for key in fused_scores.keys():
output_dir = args.scores_root / key / ('test_' + split + '.pkl')
output_dir.parent.mkdir(parents=True, exist_ok=True)
with open(output_dir, 'wb') as f:
pickle.dump(fused_scores[key], f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('scores_root', type=Path)
parser.add_argument('--rgb', type=Path, help='Directory of the RGB scores')
parser.add_argument('--flow', type=Path, help='Directory of the Flow scores')
parser.add_argument('--spec', type=Path, help='Directory of the Spectrogram scores')
args = parser.parse_args()
main(args) | 0.777553 | 0.360517 |
import os, sys
from pathlib import Path
import configparser
from .Logger import LOGGER
import uuid
import base64
#just for test
current_dir = os.getcwd()
sys.path.append(current_dir)
#just for test
try:
from Fcp.Node import Node
except ModuleNotFoundError:
raise ModuleNotFoundError('Fcp module is required')
CONFIG_DIR = '{0}/.config/freesnake/freewebsite'.format(str(Path.home()))
CONFIG_FILE = '{0}/conf'.format(CONFIG_DIR)
DB_FILENAME = '{0}/freewebsite.db'.format(CONFIG_DIR)
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = '9481'
DEFAULT_ENGINE_MODE = 'socket'
DEFAULT_LOG = 'file'
class Core(object):
'''
'''
def __init__(self):
Path(CONFIG_DIR).mkdir(parents=True, exist_ok=True)
if not Path(CONFIG_FILE).exists():
self.set_config(
host = DEFAULT_HOST,
port = DEFAULT_PORT,
name_of_connection = 'freesite_{0}'.format(self.get_a_uuid()),
engine_mode = DEFAULT_ENGINE_MODE,
log = DEFAULT_LOG)
self.node = Node()
def get_config(self):
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
return config['DEFAULT']
def set_config(self, **config_data):
config_file = Path(CONFIG_FILE)
config = configparser.ConfigParser()
config['DEFAULT'] = { 'HOST' : config_data['host'],
'PORT' : config_data['port'],
'NAME_OF_CONNECTION' : config_data['name_of_connection'],
'ENGINE_MODE' : config_data['engine_mode'],
'LOG' : config_data['log']
}
with open(str(config_file), 'w') as configfile:
config.write(configfile)
def connect_to_node(self):
self.node.peer_addr = self.get_config()['HOST']
self.node.peer_port = int(self.get_config()['PORT'])
self.node.name_of_connection = self.get_config()['NAME_OF_CONNECTION']
self.node.engine_mode = self.get_config()['ENGINE_MODE']
self.node.log = self.get_config()['LOG']
self.node.connect_to_node()
def disconnect_from_node(self):
self.node.disconnect_from_node()
def get_a_uuid(self, round = 3):
r_uuid = base64.urlsafe_b64encode(uuid.uuid4().bytes)
key = ''
for i in range(round):
key += r_uuid.decode().replace('=', '')
return 'website_{0}'.format(key) | Fsite/Base/Core.py | import os, sys
from pathlib import Path
import configparser
from .Logger import LOGGER
import uuid
import base64
#just for test
current_dir = os.getcwd()
sys.path.append(current_dir)
#just for test
try:
from Fcp.Node import Node
except ModuleNotFoundError:
raise ModuleNotFoundError('Fcp module is required')
CONFIG_DIR = '{0}/.config/freesnake/freewebsite'.format(str(Path.home()))
CONFIG_FILE = '{0}/conf'.format(CONFIG_DIR)
DB_FILENAME = '{0}/freewebsite.db'.format(CONFIG_DIR)
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = '9481'
DEFAULT_ENGINE_MODE = 'socket'
DEFAULT_LOG = 'file'
class Core(object):
'''
'''
def __init__(self):
Path(CONFIG_DIR).mkdir(parents=True, exist_ok=True)
if not Path(CONFIG_FILE).exists():
self.set_config(
host = DEFAULT_HOST,
port = DEFAULT_PORT,
name_of_connection = 'freesite_{0}'.format(self.get_a_uuid()),
engine_mode = DEFAULT_ENGINE_MODE,
log = DEFAULT_LOG)
self.node = Node()
def get_config(self):
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
return config['DEFAULT']
def set_config(self, **config_data):
config_file = Path(CONFIG_FILE)
config = configparser.ConfigParser()
config['DEFAULT'] = { 'HOST' : config_data['host'],
'PORT' : config_data['port'],
'NAME_OF_CONNECTION' : config_data['name_of_connection'],
'ENGINE_MODE' : config_data['engine_mode'],
'LOG' : config_data['log']
}
with open(str(config_file), 'w') as configfile:
config.write(configfile)
def connect_to_node(self):
self.node.peer_addr = self.get_config()['HOST']
self.node.peer_port = int(self.get_config()['PORT'])
self.node.name_of_connection = self.get_config()['NAME_OF_CONNECTION']
self.node.engine_mode = self.get_config()['ENGINE_MODE']
self.node.log = self.get_config()['LOG']
self.node.connect_to_node()
def disconnect_from_node(self):
self.node.disconnect_from_node()
def get_a_uuid(self, round = 3):
r_uuid = base64.urlsafe_b64encode(uuid.uuid4().bytes)
key = ''
for i in range(round):
key += r_uuid.decode().replace('=', '')
return 'website_{0}'.format(key) | 0.197135 | 0.049154 |
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.yida_1_0 import models as dingtalkyida__1__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def update_status(
self,
request: dingtalkyida__1__0_models.UpdateStatusRequest,
) -> dingtalkyida__1__0_models.UpdateStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateStatusHeaders()
return self.update_status_with_options(request, headers, runtime)
async def update_status_async(
self,
request: dingtalkyida__1__0_models.UpdateStatusRequest,
) -> dingtalkyida__1__0_models.UpdateStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateStatusHeaders()
return await self.update_status_with_options_async(request, headers, runtime)
def update_status_with_options(
self,
request: dingtalkyida__1__0_models.UpdateStatusRequest,
headers: dingtalkyida__1__0_models.UpdateStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateStatusResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.import_sequence):
body['importSequence'] = request.import_sequence
if not UtilClient.is_unset(request.error_lines):
body['errorLines'] = request.error_lines
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateStatusResponse(),
self.do_roarequest('UpdateStatus', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/forms/status', 'none', req, runtime)
)
async def update_status_with_options_async(
self,
request: dingtalkyida__1__0_models.UpdateStatusRequest,
headers: dingtalkyida__1__0_models.UpdateStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateStatusResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.import_sequence):
body['importSequence'] = request.import_sequence
if not UtilClient.is_unset(request.error_lines):
body['errorLines'] = request.error_lines
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateStatusResponse(),
await self.do_roarequest_async('UpdateStatus', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/forms/status', 'none', req, runtime)
)
def get_instances_by_id_list(
self,
request: dingtalkyida__1__0_models.GetInstancesByIdListRequest,
) -> dingtalkyida__1__0_models.GetInstancesByIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstancesByIdListHeaders()
return self.get_instances_by_id_list_with_options(request, headers, runtime)
async def get_instances_by_id_list_async(
self,
request: dingtalkyida__1__0_models.GetInstancesByIdListRequest,
) -> dingtalkyida__1__0_models.GetInstancesByIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstancesByIdListHeaders()
return await self.get_instances_by_id_list_with_options_async(request, headers, runtime)
def get_instances_by_id_list_with_options(
self,
request: dingtalkyida__1__0_models.GetInstancesByIdListRequest,
headers: dingtalkyida__1__0_models.GetInstancesByIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstancesByIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_ids):
query['processInstanceIds'] = request.process_instance_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstancesByIdListResponse(),
self.do_roarequest('GetInstancesByIdList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instances/searchWithIds', 'json', req, runtime)
)
async def get_instances_by_id_list_with_options_async(
self,
request: dingtalkyida__1__0_models.GetInstancesByIdListRequest,
headers: dingtalkyida__1__0_models.GetInstancesByIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstancesByIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_ids):
query['processInstanceIds'] = request.process_instance_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstancesByIdListResponse(),
await self.do_roarequest_async('GetInstancesByIdList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instances/searchWithIds', 'json', req, runtime)
)
def save_form_remark(
self,
request: dingtalkyida__1__0_models.SaveFormRemarkRequest,
) -> dingtalkyida__1__0_models.SaveFormRemarkResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SaveFormRemarkHeaders()
return self.save_form_remark_with_options(request, headers, runtime)
async def save_form_remark_async(
self,
request: dingtalkyida__1__0_models.SaveFormRemarkRequest,
) -> dingtalkyida__1__0_models.SaveFormRemarkResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SaveFormRemarkHeaders()
return await self.save_form_remark_with_options_async(request, headers, runtime)
def save_form_remark_with_options(
self,
request: dingtalkyida__1__0_models.SaveFormRemarkRequest,
headers: dingtalkyida__1__0_models.SaveFormRemarkHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SaveFormRemarkResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.reply_id):
body['replyId'] = request.reply_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
body['formInstanceId'] = request.form_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.at_user_id):
body['atUserId'] = request.at_user_id
if not UtilClient.is_unset(request.content):
body['content'] = request.content
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SaveFormRemarkResponse(),
self.do_roarequest('SaveFormRemark', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/remarks', 'json', req, runtime)
)
async def save_form_remark_with_options_async(
self,
request: dingtalkyida__1__0_models.SaveFormRemarkRequest,
headers: dingtalkyida__1__0_models.SaveFormRemarkHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SaveFormRemarkResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.reply_id):
body['replyId'] = request.reply_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
body['formInstanceId'] = request.form_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.at_user_id):
body['atUserId'] = request.at_user_id
if not UtilClient.is_unset(request.content):
body['content'] = request.content
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SaveFormRemarkResponse(),
await self.do_roarequest_async('SaveFormRemark', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/remarks', 'json', req, runtime)
)
def list_table_data_by_form_instance_id_table_id(
self,
form_instance_id: str,
request: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdRequest,
) -> dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdHeaders()
return self.list_table_data_by_form_instance_id_table_id_with_options(form_instance_id, request, headers, runtime)
async def list_table_data_by_form_instance_id_table_id_async(
self,
form_instance_id: str,
request: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdRequest,
) -> dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdHeaders()
return await self.list_table_data_by_form_instance_id_table_id_with_options_async(form_instance_id, request, headers, runtime)
def list_table_data_by_form_instance_id_table_id_with_options(
self,
form_instance_id: str,
request: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdRequest,
headers: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.form_uuid):
query['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.table_field_id):
query['tableFieldId'] = request.table_field_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse(),
self.do_roarequest('ListTableDataByFormInstanceIdTableId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/innerTables/{form_instance_id}', 'json', req, runtime)
)
async def list_table_data_by_form_instance_id_table_id_with_options_async(
self,
form_instance_id: str,
request: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdRequest,
headers: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.form_uuid):
query['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.table_field_id):
query['tableFieldId'] = request.table_field_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse(),
await self.do_roarequest_async('ListTableDataByFormInstanceIdTableId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/innerTables/{form_instance_id}', 'json', req, runtime)
)
def get_task_copies(
self,
request: dingtalkyida__1__0_models.GetTaskCopiesRequest,
) -> dingtalkyida__1__0_models.GetTaskCopiesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetTaskCopiesHeaders()
return self.get_task_copies_with_options(request, headers, runtime)
async def get_task_copies_async(
self,
request: dingtalkyida__1__0_models.GetTaskCopiesRequest,
) -> dingtalkyida__1__0_models.GetTaskCopiesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetTaskCopiesHeaders()
return await self.get_task_copies_with_options_async(request, headers, runtime)
def get_task_copies_with_options(
self,
request: dingtalkyida__1__0_models.GetTaskCopiesRequest,
headers: dingtalkyida__1__0_models.GetTaskCopiesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetTaskCopiesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetTaskCopiesResponse(),
self.do_roarequest('GetTaskCopies', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/taskCopies', 'json', req, runtime)
)
async def get_task_copies_with_options_async(
self,
request: dingtalkyida__1__0_models.GetTaskCopiesRequest,
headers: dingtalkyida__1__0_models.GetTaskCopiesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetTaskCopiesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetTaskCopiesResponse(),
await self.do_roarequest_async('GetTaskCopies', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/taskCopies', 'json', req, runtime)
)
def get_running_tasks(
self,
request: dingtalkyida__1__0_models.GetRunningTasksRequest,
) -> dingtalkyida__1__0_models.GetRunningTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetRunningTasksHeaders()
return self.get_running_tasks_with_options(request, headers, runtime)
async def get_running_tasks_async(
self,
request: dingtalkyida__1__0_models.GetRunningTasksRequest,
) -> dingtalkyida__1__0_models.GetRunningTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetRunningTasksHeaders()
return await self.get_running_tasks_with_options_async(request, headers, runtime)
def get_running_tasks_with_options(
self,
request: dingtalkyida__1__0_models.GetRunningTasksRequest,
headers: dingtalkyida__1__0_models.GetRunningTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetRunningTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetRunningTasksResponse(),
self.do_roarequest('GetRunningTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/tasks/getRunningTasks', 'json', req, runtime)
)
async def get_running_tasks_with_options_async(
self,
request: dingtalkyida__1__0_models.GetRunningTasksRequest,
headers: dingtalkyida__1__0_models.GetRunningTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetRunningTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetRunningTasksResponse(),
await self.do_roarequest_async('GetRunningTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/tasks/getRunningTasks', 'json', req, runtime)
)
def list_navigation_by_form_type(
self,
request: dingtalkyida__1__0_models.ListNavigationByFormTypeRequest,
) -> dingtalkyida__1__0_models.ListNavigationByFormTypeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListNavigationByFormTypeHeaders()
return self.list_navigation_by_form_type_with_options(request, headers, runtime)
async def list_navigation_by_form_type_async(
self,
request: dingtalkyida__1__0_models.ListNavigationByFormTypeRequest,
) -> dingtalkyida__1__0_models.ListNavigationByFormTypeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListNavigationByFormTypeHeaders()
return await self.list_navigation_by_form_type_with_options_async(request, headers, runtime)
def list_navigation_by_form_type_with_options(
self,
request: dingtalkyida__1__0_models.ListNavigationByFormTypeRequest,
headers: dingtalkyida__1__0_models.ListNavigationByFormTypeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListNavigationByFormTypeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.form_type):
query['formType'] = request.form_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListNavigationByFormTypeResponse(),
self.do_roarequest('ListNavigationByFormType', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/navigations', 'json', req, runtime)
)
async def list_navigation_by_form_type_with_options_async(
self,
request: dingtalkyida__1__0_models.ListNavigationByFormTypeRequest,
headers: dingtalkyida__1__0_models.ListNavigationByFormTypeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListNavigationByFormTypeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.form_type):
query['formType'] = request.form_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListNavigationByFormTypeResponse(),
await self.do_roarequest_async('ListNavigationByFormType', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/navigations', 'json', req, runtime)
)
def terminate_instance(
self,
request: dingtalkyida__1__0_models.TerminateInstanceRequest,
) -> dingtalkyida__1__0_models.TerminateInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.TerminateInstanceHeaders()
return self.terminate_instance_with_options(request, headers, runtime)
async def terminate_instance_async(
self,
request: dingtalkyida__1__0_models.TerminateInstanceRequest,
) -> dingtalkyida__1__0_models.TerminateInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.TerminateInstanceHeaders()
return await self.terminate_instance_with_options_async(request, headers, runtime)
def terminate_instance_with_options(
self,
request: dingtalkyida__1__0_models.TerminateInstanceRequest,
headers: dingtalkyida__1__0_models.TerminateInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.TerminateInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.TerminateInstanceResponse(),
self.do_roarequest('TerminateInstance', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/processes/instances/terminate', 'none', req, runtime)
)
async def terminate_instance_with_options_async(
self,
request: dingtalkyida__1__0_models.TerminateInstanceRequest,
headers: dingtalkyida__1__0_models.TerminateInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.TerminateInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.TerminateInstanceResponse(),
await self.do_roarequest_async('TerminateInstance', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/processes/instances/terminate', 'none', req, runtime)
)
def check_cloud_account_status(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.CheckCloudAccountStatusRequest,
) -> dingtalkyida__1__0_models.CheckCloudAccountStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.CheckCloudAccountStatusHeaders()
return self.check_cloud_account_status_with_options(caller_uid, request, headers, runtime)
async def check_cloud_account_status_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.CheckCloudAccountStatusRequest,
) -> dingtalkyida__1__0_models.CheckCloudAccountStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.CheckCloudAccountStatusHeaders()
return await self.check_cloud_account_status_with_options_async(caller_uid, request, headers, runtime)
def check_cloud_account_status_with_options(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.CheckCloudAccountStatusRequest,
headers: dingtalkyida__1__0_models.CheckCloudAccountStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.CheckCloudAccountStatusResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.CheckCloudAccountStatusResponse(),
self.do_roarequest('CheckCloudAccountStatus', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/cloudAccountStatus/{caller_uid}', 'json', req, runtime)
)
async def check_cloud_account_status_with_options_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.CheckCloudAccountStatusRequest,
headers: dingtalkyida__1__0_models.CheckCloudAccountStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.CheckCloudAccountStatusResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.CheckCloudAccountStatusResponse(),
await self.do_roarequest_async('CheckCloudAccountStatus', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/cloudAccountStatus/{caller_uid}', 'json', req, runtime)
)
def get_corp_accomplishment_tasks(
self,
corp_id: str,
user_id: str,
request: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksRequest,
) -> dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpAccomplishmentTasksHeaders()
return self.get_corp_accomplishment_tasks_with_options(corp_id, user_id, request, headers, runtime)
async def get_corp_accomplishment_tasks_async(
self,
corp_id: str,
user_id: str,
request: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksRequest,
) -> dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpAccomplishmentTasksHeaders()
return await self.get_corp_accomplishment_tasks_with_options_async(corp_id, user_id, request, headers, runtime)
def get_corp_accomplishment_tasks_with_options(
self,
corp_id: str,
user_id: str,
request: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksRequest,
headers: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse(),
self.do_roarequest('GetCorpAccomplishmentTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/completedTasks/{corp_id}/{user_id}', 'json', req, runtime)
)
async def get_corp_accomplishment_tasks_with_options_async(
self,
corp_id: str,
user_id: str,
request: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksRequest,
headers: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse(),
await self.do_roarequest_async('GetCorpAccomplishmentTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/completedTasks/{corp_id}/{user_id}', 'json', req, runtime)
)
def get_instances(
self,
request: dingtalkyida__1__0_models.GetInstancesRequest,
) -> dingtalkyida__1__0_models.GetInstancesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstancesHeaders()
return self.get_instances_with_options(request, headers, runtime)
async def get_instances_async(
self,
request: dingtalkyida__1__0_models.GetInstancesRequest,
) -> dingtalkyida__1__0_models.GetInstancesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstancesHeaders()
return await self.get_instances_with_options_async(request, headers, runtime)
def get_instances_with_options(
self,
request: dingtalkyida__1__0_models.GetInstancesRequest,
headers: dingtalkyida__1__0_models.GetInstancesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstancesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.instance_status):
body['instanceStatus'] = request.instance_status
if not UtilClient.is_unset(request.approved_result):
body['approvedResult'] = request.approved_result
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstancesResponse(),
self.do_roarequest('GetInstances', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instances', 'json', req, runtime)
)
async def get_instances_with_options_async(
self,
request: dingtalkyida__1__0_models.GetInstancesRequest,
headers: dingtalkyida__1__0_models.GetInstancesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstancesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.instance_status):
body['instanceStatus'] = request.instance_status
if not UtilClient.is_unset(request.approved_result):
body['approvedResult'] = request.approved_result
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstancesResponse(),
await self.do_roarequest_async('GetInstances', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instances', 'json', req, runtime)
)
def list_application_authorization_service_connector_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationHeaders()
return self.list_application_authorization_service_connector_information_with_options(instance_id, request, headers, runtime)
async def list_application_authorization_service_connector_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationHeaders()
return await self.list_application_authorization_service_connector_information_with_options_async(instance_id, request, headers, runtime)
def list_application_authorization_service_connector_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse(),
self.do_roarequest('ListApplicationAuthorizationServiceConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationAuthorizations/plugs/{instance_id}', 'json', req, runtime)
)
async def list_application_authorization_service_connector_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse(),
await self.do_roarequest_async('ListApplicationAuthorizationServiceConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationAuthorizations/plugs/{instance_id}', 'json', req, runtime)
)
def validate_order_buy(
self,
request: dingtalkyida__1__0_models.ValidateOrderBuyRequest,
) -> dingtalkyida__1__0_models.ValidateOrderBuyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderBuyHeaders()
return self.validate_order_buy_with_options(request, headers, runtime)
async def validate_order_buy_async(
self,
request: dingtalkyida__1__0_models.ValidateOrderBuyRequest,
) -> dingtalkyida__1__0_models.ValidateOrderBuyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderBuyHeaders()
return await self.validate_order_buy_with_options_async(request, headers, runtime)
def validate_order_buy_with_options(
self,
request: dingtalkyida__1__0_models.ValidateOrderBuyRequest,
headers: dingtalkyida__1__0_models.ValidateOrderBuyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderBuyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderBuyResponse(),
self.do_roarequest('ValidateOrderBuy', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/orderBuy/validate', 'json', req, runtime)
)
async def validate_order_buy_with_options_async(
self,
request: dingtalkyida__1__0_models.ValidateOrderBuyRequest,
headers: dingtalkyida__1__0_models.ValidateOrderBuyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderBuyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderBuyResponse(),
await self.do_roarequest_async('ValidateOrderBuy', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/orderBuy/validate', 'json', req, runtime)
)
def renew_tenant_order(
self,
request: dingtalkyida__1__0_models.RenewTenantOrderRequest,
) -> dingtalkyida__1__0_models.RenewTenantOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewTenantOrderHeaders()
return self.renew_tenant_order_with_options(request, headers, runtime)
async def renew_tenant_order_async(
self,
request: dingtalkyida__1__0_models.RenewTenantOrderRequest,
) -> dingtalkyida__1__0_models.RenewTenantOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewTenantOrderHeaders()
return await self.renew_tenant_order_with_options_async(request, headers, runtime)
def renew_tenant_order_with_options(
self,
request: dingtalkyida__1__0_models.RenewTenantOrderRequest,
headers: dingtalkyida__1__0_models.RenewTenantOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewTenantOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewTenantOrderResponse(),
self.do_roarequest('RenewTenantOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/tenants/reorder', 'json', req, runtime)
)
async def renew_tenant_order_with_options_async(
self,
request: dingtalkyida__1__0_models.RenewTenantOrderRequest,
headers: dingtalkyida__1__0_models.RenewTenantOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewTenantOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewTenantOrderResponse(),
await self.do_roarequest_async('RenewTenantOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/tenants/reorder', 'json', req, runtime)
)
def get_print_dictionary(
self,
request: dingtalkyida__1__0_models.GetPrintDictionaryRequest,
) -> dingtalkyida__1__0_models.GetPrintDictionaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPrintDictionaryHeaders()
return self.get_print_dictionary_with_options(request, headers, runtime)
async def get_print_dictionary_async(
self,
request: dingtalkyida__1__0_models.GetPrintDictionaryRequest,
) -> dingtalkyida__1__0_models.GetPrintDictionaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPrintDictionaryHeaders()
return await self.get_print_dictionary_with_options_async(request, headers, runtime)
def get_print_dictionary_with_options(
self,
request: dingtalkyida__1__0_models.GetPrintDictionaryRequest,
headers: dingtalkyida__1__0_models.GetPrintDictionaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPrintDictionaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.form_uuid):
query['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.version):
query['version'] = request.version
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPrintDictionaryResponse(),
self.do_roarequest('GetPrintDictionary', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/printTemplates/printDictionaries', 'json', req, runtime)
)
async def get_print_dictionary_with_options_async(
self,
request: dingtalkyida__1__0_models.GetPrintDictionaryRequest,
headers: dingtalkyida__1__0_models.GetPrintDictionaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPrintDictionaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.form_uuid):
query['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.version):
query['version'] = request.version
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPrintDictionaryResponse(),
await self.do_roarequest_async('GetPrintDictionary', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/printTemplates/printDictionaries', 'json', req, runtime)
)
def update_instance(
self,
request: dingtalkyida__1__0_models.UpdateInstanceRequest,
) -> dingtalkyida__1__0_models.UpdateInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateInstanceHeaders()
return self.update_instance_with_options(request, headers, runtime)
async def update_instance_async(
self,
request: dingtalkyida__1__0_models.UpdateInstanceRequest,
) -> dingtalkyida__1__0_models.UpdateInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateInstanceHeaders()
return await self.update_instance_with_options_async(request, headers, runtime)
def update_instance_with_options(
self,
request: dingtalkyida__1__0_models.UpdateInstanceRequest,
headers: dingtalkyida__1__0_models.UpdateInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateInstanceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.update_form_data_json):
body['updateFormDataJson'] = request.update_form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateInstanceResponse(),
self.do_roarequest('UpdateInstance', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/processes/instances', 'none', req, runtime)
)
async def update_instance_with_options_async(
self,
request: dingtalkyida__1__0_models.UpdateInstanceRequest,
headers: dingtalkyida__1__0_models.UpdateInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateInstanceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.update_form_data_json):
body['updateFormDataJson'] = request.update_form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateInstanceResponse(),
await self.do_roarequest_async('UpdateInstance', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/processes/instances', 'none', req, runtime)
)
def buy_authorization_order(
self,
request: dingtalkyida__1__0_models.BuyAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.BuyAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.BuyAuthorizationOrderHeaders()
return self.buy_authorization_order_with_options(request, headers, runtime)
async def buy_authorization_order_async(
self,
request: dingtalkyida__1__0_models.BuyAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.BuyAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.BuyAuthorizationOrderHeaders()
return await self.buy_authorization_order_with_options_async(request, headers, runtime)
def buy_authorization_order_with_options(
self,
request: dingtalkyida__1__0_models.BuyAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.BuyAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.BuyAuthorizationOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.BuyAuthorizationOrderResponse(),
self.do_roarequest('BuyAuthorizationOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/appAuthorizations/order', 'json', req, runtime)
)
async def buy_authorization_order_with_options_async(
self,
request: dingtalkyida__1__0_models.BuyAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.BuyAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.BuyAuthorizationOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.BuyAuthorizationOrderResponse(),
await self.do_roarequest_async('BuyAuthorizationOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/appAuthorizations/order', 'json', req, runtime)
)
def validate_application_service_order_upgrade(
self,
caller_unionid: str,
request: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeHeaders()
return self.validate_application_service_order_upgrade_with_options(caller_unionid, request, headers, runtime)
async def validate_application_service_order_upgrade_async(
self,
caller_unionid: str,
request: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeHeaders()
return await self.validate_application_service_order_upgrade_with_options_async(caller_unionid, request, headers, runtime)
def validate_application_service_order_upgrade_with_options(
self,
caller_unionid: str,
request: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse(),
self.do_roarequest('ValidateApplicationServiceOrderUpgrade', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applications/orderValidations/{caller_unionid}', 'json', req, runtime)
)
async def validate_application_service_order_upgrade_with_options_async(
self,
caller_unionid: str,
request: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse(),
await self.do_roarequest_async('ValidateApplicationServiceOrderUpgrade', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applications/orderValidations/{caller_unionid}', 'json', req, runtime)
)
def get_corp_tasks(
self,
request: dingtalkyida__1__0_models.GetCorpTasksRequest,
) -> dingtalkyida__1__0_models.GetCorpTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpTasksHeaders()
return self.get_corp_tasks_with_options(request, headers, runtime)
async def get_corp_tasks_async(
self,
request: dingtalkyida__1__0_models.GetCorpTasksRequest,
) -> dingtalkyida__1__0_models.GetCorpTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpTasksHeaders()
return await self.get_corp_tasks_with_options_async(request, headers, runtime)
def get_corp_tasks_with_options(
self,
request: dingtalkyida__1__0_models.GetCorpTasksRequest,
headers: dingtalkyida__1__0_models.GetCorpTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpTasksResponse(),
self.do_roarequest('GetCorpTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpTasks', 'json', req, runtime)
)
async def get_corp_tasks_with_options_async(
self,
request: dingtalkyida__1__0_models.GetCorpTasksRequest,
headers: dingtalkyida__1__0_models.GetCorpTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpTasksResponse(),
await self.do_roarequest_async('GetCorpTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpTasks', 'json', req, runtime)
)
def list_commodity(
self,
request: dingtalkyida__1__0_models.ListCommodityRequest,
) -> dingtalkyida__1__0_models.ListCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListCommodityHeaders()
return self.list_commodity_with_options(request, headers, runtime)
async def list_commodity_async(
self,
request: dingtalkyida__1__0_models.ListCommodityRequest,
) -> dingtalkyida__1__0_models.ListCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListCommodityHeaders()
return await self.list_commodity_with_options_async(request, headers, runtime)
def list_commodity_with_options(
self,
request: dingtalkyida__1__0_models.ListCommodityRequest,
headers: dingtalkyida__1__0_models.ListCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListCommodityResponse(),
self.do_roarequest('ListCommodity', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appAuth/commodities', 'json', req, runtime)
)
async def list_commodity_with_options_async(
self,
request: dingtalkyida__1__0_models.ListCommodityRequest,
headers: dingtalkyida__1__0_models.ListCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListCommodityResponse(),
await self.do_roarequest_async('ListCommodity', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appAuth/commodities', 'json', req, runtime)
)
def notify_authorization_result(
self,
request: dingtalkyida__1__0_models.NotifyAuthorizationResultRequest,
) -> dingtalkyida__1__0_models.NotifyAuthorizationResultResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.NotifyAuthorizationResultHeaders()
return self.notify_authorization_result_with_options(request, headers, runtime)
async def notify_authorization_result_async(
self,
request: dingtalkyida__1__0_models.NotifyAuthorizationResultRequest,
) -> dingtalkyida__1__0_models.NotifyAuthorizationResultResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.NotifyAuthorizationResultHeaders()
return await self.notify_authorization_result_with_options_async(request, headers, runtime)
def notify_authorization_result_with_options(
self,
request: dingtalkyida__1__0_models.NotifyAuthorizationResultRequest,
headers: dingtalkyida__1__0_models.NotifyAuthorizationResultHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.NotifyAuthorizationResultResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.caller_uid):
body['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.NotifyAuthorizationResultResponse(),
self.do_roarequest('NotifyAuthorizationResult', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/authorizationResults/notify', 'json', req, runtime)
)
async def notify_authorization_result_with_options_async(
self,
request: dingtalkyida__1__0_models.NotifyAuthorizationResultRequest,
headers: dingtalkyida__1__0_models.NotifyAuthorizationResultHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.NotifyAuthorizationResultResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.caller_uid):
body['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.NotifyAuthorizationResultResponse(),
await self.do_roarequest_async('NotifyAuthorizationResult', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/authorizationResults/notify', 'json', req, runtime)
)
def buy_fresh_order(
self,
request: dingtalkyida__1__0_models.BuyFreshOrderRequest,
) -> dingtalkyida__1__0_models.BuyFreshOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.BuyFreshOrderHeaders()
return self.buy_fresh_order_with_options(request, headers, runtime)
async def buy_fresh_order_async(
self,
request: dingtalkyida__1__0_models.BuyFreshOrderRequest,
) -> dingtalkyida__1__0_models.BuyFreshOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.BuyFreshOrderHeaders()
return await self.buy_fresh_order_with_options_async(request, headers, runtime)
def buy_fresh_order_with_options(
self,
request: dingtalkyida__1__0_models.BuyFreshOrderRequest,
headers: dingtalkyida__1__0_models.BuyFreshOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.BuyFreshOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.BuyFreshOrderResponse(),
self.do_roarequest('BuyFreshOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/freshOrders', 'json', req, runtime)
)
async def buy_fresh_order_with_options_async(
self,
request: dingtalkyida__1__0_models.BuyFreshOrderRequest,
headers: dingtalkyida__1__0_models.BuyFreshOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.BuyFreshOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.BuyFreshOrderResponse(),
await self.do_roarequest_async('BuyFreshOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/freshOrders', 'json', req, runtime)
)
def remove_tenant_resource(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.RemoveTenantResourceRequest,
) -> dingtalkyida__1__0_models.RemoveTenantResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RemoveTenantResourceHeaders()
return self.remove_tenant_resource_with_options(caller_uid, request, headers, runtime)
async def remove_tenant_resource_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.RemoveTenantResourceRequest,
) -> dingtalkyida__1__0_models.RemoveTenantResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RemoveTenantResourceHeaders()
return await self.remove_tenant_resource_with_options_async(caller_uid, request, headers, runtime)
def remove_tenant_resource_with_options(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.RemoveTenantResourceRequest,
headers: dingtalkyida__1__0_models.RemoveTenantResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RemoveTenantResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RemoveTenantResourceResponse(),
self.do_roarequest('RemoveTenantResource', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/applications/tenantRelatedResources/{caller_uid}', 'json', req, runtime)
)
async def remove_tenant_resource_with_options_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.RemoveTenantResourceRequest,
headers: dingtalkyida__1__0_models.RemoveTenantResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RemoveTenantResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RemoveTenantResourceResponse(),
await self.do_roarequest_async('RemoveTenantResource', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/applications/tenantRelatedResources/{caller_uid}', 'json', req, runtime)
)
def renew_application_authorization_service_order(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders()
return self.renew_application_authorization_service_order_with_options(request, headers, runtime)
async def renew_application_authorization_service_order_async(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders()
return await self.renew_application_authorization_service_order_with_options_async(request, headers, runtime)
def renew_application_authorization_service_order_with_options(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse(),
self.do_roarequest('RenewApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/orders/renew', 'json', req, runtime)
)
async def renew_application_authorization_service_order_with_options_async(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse(),
await self.do_roarequest_async('RenewApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/orders/renew', 'json', req, runtime)
)
def get_process_definition(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetProcessDefinitionHeaders()
return self.get_process_definition_with_options(process_instance_id, request, headers, runtime)
async def get_process_definition_async(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetProcessDefinitionHeaders()
return await self.get_process_definition_with_options_async(process_instance_id, request, headers, runtime)
def get_process_definition_with_options(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
headers: dingtalkyida__1__0_models.GetProcessDefinitionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.group_id):
query['groupId'] = request.group_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.order_number):
query['orderNumber'] = request.order_number
if not UtilClient.is_unset(request.system_type):
query['systemType'] = request.system_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.name_space):
query['nameSpace'] = request.name_space
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetProcessDefinitionResponse(),
self.do_roarequest('GetProcessDefinition', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/definitions/{process_instance_id}', 'json', req, runtime)
)
async def get_process_definition_with_options_async(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
headers: dingtalkyida__1__0_models.GetProcessDefinitionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.group_id):
query['groupId'] = request.group_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.order_number):
query['orderNumber'] = request.order_number
if not UtilClient.is_unset(request.system_type):
query['systemType'] = request.system_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.name_space):
query['nameSpace'] = request.name_space
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetProcessDefinitionResponse(),
await self.do_roarequest_async('GetProcessDefinition', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/definitions/{process_instance_id}', 'json', req, runtime)
)
def upgrade_tenant_information(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpgradeTenantInformationHeaders()
return self.upgrade_tenant_information_with_options(request, headers, runtime)
async def upgrade_tenant_information_async(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpgradeTenantInformationHeaders()
return await self.upgrade_tenant_information_with_options_async(request, headers, runtime)
def upgrade_tenant_information_with_options(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
headers: dingtalkyida__1__0_models.UpgradeTenantInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpgradeTenantInformationResponse(),
self.do_roarequest('UpgradeTenantInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/tenantInfos', 'json', req, runtime)
)
async def upgrade_tenant_information_with_options_async(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
headers: dingtalkyida__1__0_models.UpgradeTenantInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpgradeTenantInformationResponse(),
await self.do_roarequest_async('UpgradeTenantInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/tenantInfos', 'json', req, runtime)
)
def get_application_authorization_service_platform_resource(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders()
return self.get_application_authorization_service_platform_resource_with_options(request, headers, runtime)
async def get_application_authorization_service_platform_resource_async(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders()
return await self.get_application_authorization_service_platform_resource_with_options_async(request, headers, runtime)
def get_application_authorization_service_platform_resource_with_options(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse(),
self.do_roarequest('GetApplicationAuthorizationServicePlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorization/platformResources', 'json', req, runtime)
)
async def get_application_authorization_service_platform_resource_with_options_async(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse(),
await self.do_roarequest_async('GetApplicationAuthorizationServicePlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorization/platformResources', 'json', req, runtime)
)
def list_application_authorization_service_application_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders()
return self.list_application_authorization_service_application_information_with_options(instance_id, request, headers, runtime)
async def list_application_authorization_service_application_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders()
return await self.list_application_authorization_service_application_information_with_options_async(instance_id, request, headers, runtime)
def list_application_authorization_service_application_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse(),
self.do_roarequest('ListApplicationAuthorizationServiceApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorizations/applicationInfos/{instance_id}', 'json', req, runtime)
)
async def list_application_authorization_service_application_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse(),
await self.do_roarequest_async('ListApplicationAuthorizationServiceApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorizations/applicationInfos/{instance_id}', 'json', req, runtime)
)
def validate_application_authorization_service_order(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders()
return self.validate_application_authorization_service_order_with_options(caller_uid, request, headers, runtime)
async def validate_application_authorization_service_order_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders()
return await self.validate_application_authorization_service_order_with_options_async(caller_uid, request, headers, runtime)
def validate_application_authorization_service_order_with_options(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse(),
self.do_roarequest('ValidateApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appsAuthorizations/freshOrderInfoReviews/{caller_uid}', 'json', req, runtime)
)
async def validate_application_authorization_service_order_with_options_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse(),
await self.do_roarequest_async('ValidateApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appsAuthorizations/freshOrderInfoReviews/{caller_uid}', 'json', req, runtime)
)
def get_activity_list(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityListHeaders()
return self.get_activity_list_with_options(request, headers, runtime)
async def get_activity_list_async(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityListHeaders()
return await self.get_activity_list_with_options_async(request, headers, runtime)
def get_activity_list_with_options(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
headers: dingtalkyida__1__0_models.GetActivityListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.process_code):
query['processCode'] = request.process_code
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivityListResponse(),
self.do_roarequest('GetActivityList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/activities', 'json', req, runtime)
)
async def get_activity_list_with_options_async(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
headers: dingtalkyida__1__0_models.GetActivityListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.process_code):
query['processCode'] = request.process_code
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivityListResponse(),
await self.do_roarequest_async('GetActivityList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/activities', 'json', req, runtime)
)
def execute_custom_api(
self,
request: dingtalkyida__1__0_models.ExecuteCustomApiRequest,
) -> dingtalkyida__1__0_models.ExecuteCustomApiResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteCustomApiHeaders()
return self.execute_custom_api_with_options(request, headers, runtime)
async def execute_custom_api_async(
self,
request: dingtalkyida__1__0_models.ExecuteCustomApiRequest,
) -> dingtalkyida__1__0_models.ExecuteCustomApiResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteCustomApiHeaders()
return await self.execute_custom_api_with_options_async(request, headers, runtime)
def execute_custom_api_with_options(
self,
request: dingtalkyida__1__0_models.ExecuteCustomApiRequest,
headers: dingtalkyida__1__0_models.ExecuteCustomApiHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteCustomApiResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.data):
query['data'] = request.data
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.service_id):
query['serviceId'] = request.service_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteCustomApiResponse(),
self.do_roarequest('ExecuteCustomApi', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/customApi/execute', 'json', req, runtime)
)
async def execute_custom_api_with_options_async(
self,
request: dingtalkyida__1__0_models.ExecuteCustomApiRequest,
headers: dingtalkyida__1__0_models.ExecuteCustomApiHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteCustomApiResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.data):
query['data'] = request.data
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.service_id):
query['serviceId'] = request.service_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteCustomApiResponse(),
await self.do_roarequest_async('ExecuteCustomApi', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/customApi/execute', 'json', req, runtime)
)
def login_code_gen(
self,
request: dingtalkyida__1__0_models.LoginCodeGenRequest,
) -> dingtalkyida__1__0_models.LoginCodeGenResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.LoginCodeGenHeaders()
return self.login_code_gen_with_options(request, headers, runtime)
async def login_code_gen_async(
self,
request: dingtalkyida__1__0_models.LoginCodeGenRequest,
) -> dingtalkyida__1__0_models.LoginCodeGenResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.LoginCodeGenHeaders()
return await self.login_code_gen_with_options_async(request, headers, runtime)
def login_code_gen_with_options(
self,
request: dingtalkyida__1__0_models.LoginCodeGenRequest,
headers: dingtalkyida__1__0_models.LoginCodeGenHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.LoginCodeGenResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.LoginCodeGenResponse(),
self.do_roarequest('LoginCodeGen', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/authorizations/loginCodes', 'json', req, runtime)
)
async def login_code_gen_with_options_async(
self,
request: dingtalkyida__1__0_models.LoginCodeGenRequest,
headers: dingtalkyida__1__0_models.LoginCodeGenHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.LoginCodeGenResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.LoginCodeGenResponse(),
await self.do_roarequest_async('LoginCodeGen', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/authorizations/loginCodes', 'json', req, runtime)
)
def terminate_cloud_authorization(
self,
request: dingtalkyida__1__0_models.TerminateCloudAuthorizationRequest,
) -> dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.TerminateCloudAuthorizationHeaders()
return self.terminate_cloud_authorization_with_options(request, headers, runtime)
async def terminate_cloud_authorization_async(
self,
request: dingtalkyida__1__0_models.TerminateCloudAuthorizationRequest,
) -> dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.TerminateCloudAuthorizationHeaders()
return await self.terminate_cloud_authorization_with_options_async(request, headers, runtime)
def terminate_cloud_authorization_with_options(
self,
request: dingtalkyida__1__0_models.TerminateCloudAuthorizationRequest,
headers: dingtalkyida__1__0_models.TerminateCloudAuthorizationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse(),
self.do_roarequest('TerminateCloudAuthorization', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/cloudAuthorizations/terminate', 'json', req, runtime)
)
async def terminate_cloud_authorization_with_options_async(
self,
request: dingtalkyida__1__0_models.TerminateCloudAuthorizationRequest,
headers: dingtalkyida__1__0_models.TerminateCloudAuthorizationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse(),
await self.do_roarequest_async('TerminateCloudAuthorization', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/cloudAuthorizations/terminate', 'json', req, runtime)
)
def get_activity_button_list(
self,
app_type: str,
process_code: str,
activity_id: str,
request: dingtalkyida__1__0_models.GetActivityButtonListRequest,
) -> dingtalkyida__1__0_models.GetActivityButtonListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityButtonListHeaders()
return self.get_activity_button_list_with_options(app_type, process_code, activity_id, request, headers, runtime)
async def get_activity_button_list_async(
self,
app_type: str,
process_code: str,
activity_id: str,
request: dingtalkyida__1__0_models.GetActivityButtonListRequest,
) -> dingtalkyida__1__0_models.GetActivityButtonListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityButtonListHeaders()
return await self.get_activity_button_list_with_options_async(app_type, process_code, activity_id, request, headers, runtime)
def get_activity_button_list_with_options(
self,
app_type: str,
process_code: str,
activity_id: str,
request: dingtalkyida__1__0_models.GetActivityButtonListRequest,
headers: dingtalkyida__1__0_models.GetActivityButtonListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivityButtonListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivityButtonListResponse(),
self.do_roarequest('GetActivityButtonList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processDefinitions/buttons/{app_type}/{process_code}/{activity_id}', 'json', req, runtime)
)
async def get_activity_button_list_with_options_async(
self,
app_type: str,
process_code: str,
activity_id: str,
request: dingtalkyida__1__0_models.GetActivityButtonListRequest,
headers: dingtalkyida__1__0_models.GetActivityButtonListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivityButtonListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivityButtonListResponse(),
await self.do_roarequest_async('GetActivityButtonList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processDefinitions/buttons/{app_type}/{process_code}/{activity_id}', 'json', req, runtime)
)
def start_instance(
self,
request: dingtalkyida__1__0_models.StartInstanceRequest,
) -> dingtalkyida__1__0_models.StartInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.StartInstanceHeaders()
return self.start_instance_with_options(request, headers, runtime)
async def start_instance_async(
self,
request: dingtalkyida__1__0_models.StartInstanceRequest,
) -> dingtalkyida__1__0_models.StartInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.StartInstanceHeaders()
return await self.start_instance_with_options_async(request, headers, runtime)
def start_instance_with_options(
self,
request: dingtalkyida__1__0_models.StartInstanceRequest,
headers: dingtalkyida__1__0_models.StartInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.StartInstanceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.process_code):
body['processCode'] = request.process_code
if not UtilClient.is_unset(request.department_id):
body['departmentId'] = request.department_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.StartInstanceResponse(),
self.do_roarequest('StartInstance', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instances/start', 'json', req, runtime)
)
async def start_instance_with_options_async(
self,
request: dingtalkyida__1__0_models.StartInstanceRequest,
headers: dingtalkyida__1__0_models.StartInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.StartInstanceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.process_code):
body['processCode'] = request.process_code
if not UtilClient.is_unset(request.department_id):
body['departmentId'] = request.department_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.StartInstanceResponse(),
await self.do_roarequest_async('StartInstance', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instances/start', 'json', req, runtime)
)
def list_application_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationInformationHeaders()
return self.list_application_information_with_options(instance_id, request, headers, runtime)
async def list_application_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationInformationHeaders()
return await self.list_application_information_with_options_async(instance_id, request, headers, runtime)
def list_application_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationInformationResponse(),
self.do_roarequest('ListApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/infos/{instance_id}', 'json', req, runtime)
)
async def list_application_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationInformationResponse(),
await self.do_roarequest_async('ListApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/infos/{instance_id}', 'json', req, runtime)
)
def validate_order_upgrade(
self,
request: dingtalkyida__1__0_models.ValidateOrderUpgradeRequest,
) -> dingtalkyida__1__0_models.ValidateOrderUpgradeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderUpgradeHeaders()
return self.validate_order_upgrade_with_options(request, headers, runtime)
async def validate_order_upgrade_async(
self,
request: dingtalkyida__1__0_models.ValidateOrderUpgradeRequest,
) -> dingtalkyida__1__0_models.ValidateOrderUpgradeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderUpgradeHeaders()
return await self.validate_order_upgrade_with_options_async(request, headers, runtime)
def validate_order_upgrade_with_options(
self,
request: dingtalkyida__1__0_models.ValidateOrderUpgradeRequest,
headers: dingtalkyida__1__0_models.ValidateOrderUpgradeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderUpgradeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderUpgradeResponse(),
self.do_roarequest('ValidateOrderUpgrade', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/orderUpgrade/validate', 'json', req, runtime)
)
async def validate_order_upgrade_with_options_async(
self,
request: dingtalkyida__1__0_models.ValidateOrderUpgradeRequest,
headers: dingtalkyida__1__0_models.ValidateOrderUpgradeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderUpgradeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderUpgradeResponse(),
await self.do_roarequest_async('ValidateOrderUpgrade', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/orderUpgrade/validate', 'json', req, runtime)
)
def update_cloud_account_information(
self,
request: dingtalkyida__1__0_models.UpdateCloudAccountInformationRequest,
) -> dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateCloudAccountInformationHeaders()
return self.update_cloud_account_information_with_options(request, headers, runtime)
async def update_cloud_account_information_async(
self,
request: dingtalkyida__1__0_models.UpdateCloudAccountInformationRequest,
) -> dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateCloudAccountInformationHeaders()
return await self.update_cloud_account_information_with_options_async(request, headers, runtime)
def update_cloud_account_information_with_options(
self,
request: dingtalkyida__1__0_models.UpdateCloudAccountInformationRequest,
headers: dingtalkyida__1__0_models.UpdateCloudAccountInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse(),
self.do_roarequest('UpdateCloudAccountInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/cloudAccountInfos', 'json', req, runtime)
)
async def update_cloud_account_information_with_options_async(
self,
request: dingtalkyida__1__0_models.UpdateCloudAccountInformationRequest,
headers: dingtalkyida__1__0_models.UpdateCloudAccountInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse(),
await self.do_roarequest_async('UpdateCloudAccountInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/cloudAccountInfos', 'json', req, runtime)
)
def get_corp_level_by_account_id(
self,
request: dingtalkyida__1__0_models.GetCorpLevelByAccountIdRequest,
) -> dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpLevelByAccountIdHeaders()
return self.get_corp_level_by_account_id_with_options(request, headers, runtime)
async def get_corp_level_by_account_id_async(
self,
request: dingtalkyida__1__0_models.GetCorpLevelByAccountIdRequest,
) -> dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpLevelByAccountIdHeaders()
return await self.get_corp_level_by_account_id_with_options_async(request, headers, runtime)
def get_corp_level_by_account_id_with_options(
self,
request: dingtalkyida__1__0_models.GetCorpLevelByAccountIdRequest,
headers: dingtalkyida__1__0_models.GetCorpLevelByAccountIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.account_id):
query['accountId'] = request.account_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse(),
self.do_roarequest('GetCorpLevelByAccountId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/corpLevel', 'json', req, runtime)
)
async def get_corp_level_by_account_id_with_options_async(
self,
request: dingtalkyida__1__0_models.GetCorpLevelByAccountIdRequest,
headers: dingtalkyida__1__0_models.GetCorpLevelByAccountIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.account_id):
query['accountId'] = request.account_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse(),
await self.do_roarequest_async('GetCorpLevelByAccountId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/corpLevel', 'json', req, runtime)
)
def execute_platform_task(
self,
request: dingtalkyida__1__0_models.ExecutePlatformTaskRequest,
) -> dingtalkyida__1__0_models.ExecutePlatformTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecutePlatformTaskHeaders()
return self.execute_platform_task_with_options(request, headers, runtime)
async def execute_platform_task_async(
self,
request: dingtalkyida__1__0_models.ExecutePlatformTaskRequest,
) -> dingtalkyida__1__0_models.ExecutePlatformTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecutePlatformTaskHeaders()
return await self.execute_platform_task_with_options_async(request, headers, runtime)
def execute_platform_task_with_options(
self,
request: dingtalkyida__1__0_models.ExecutePlatformTaskRequest,
headers: dingtalkyida__1__0_models.ExecutePlatformTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecutePlatformTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecutePlatformTaskResponse(),
self.do_roarequest('ExecutePlatformTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/platformTasks/execute', 'none', req, runtime)
)
async def execute_platform_task_with_options_async(
self,
request: dingtalkyida__1__0_models.ExecutePlatformTaskRequest,
headers: dingtalkyida__1__0_models.ExecutePlatformTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecutePlatformTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecutePlatformTaskResponse(),
await self.do_roarequest_async('ExecutePlatformTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/platformTasks/execute', 'none', req, runtime)
)
def search_form_datas(
self,
request: dingtalkyida__1__0_models.SearchFormDatasRequest,
) -> dingtalkyida__1__0_models.SearchFormDatasResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchFormDatasHeaders()
return self.search_form_datas_with_options(request, headers, runtime)
async def search_form_datas_async(
self,
request: dingtalkyida__1__0_models.SearchFormDatasRequest,
) -> dingtalkyida__1__0_models.SearchFormDatasResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchFormDatasHeaders()
return await self.search_form_datas_with_options_async(request, headers, runtime)
def search_form_datas_with_options(
self,
request: dingtalkyida__1__0_models.SearchFormDatasRequest,
headers: dingtalkyida__1__0_models.SearchFormDatasHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchFormDatasResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.current_page):
body['currentPage'] = request.current_page
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.dynamic_order):
body['dynamicOrder'] = request.dynamic_order
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchFormDatasResponse(),
self.do_roarequest('SearchFormDatas', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances/search', 'json', req, runtime)
)
async def search_form_datas_with_options_async(
self,
request: dingtalkyida__1__0_models.SearchFormDatasRequest,
headers: dingtalkyida__1__0_models.SearchFormDatasHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchFormDatasResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.current_page):
body['currentPage'] = request.current_page
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.dynamic_order):
body['dynamicOrder'] = request.dynamic_order
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchFormDatasResponse(),
await self.do_roarequest_async('SearchFormDatas', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances/search', 'json', req, runtime)
)
def search_activation_code(
self,
request: dingtalkyida__1__0_models.SearchActivationCodeRequest,
) -> dingtalkyida__1__0_models.SearchActivationCodeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchActivationCodeHeaders()
return self.search_activation_code_with_options(request, headers, runtime)
async def search_activation_code_async(
self,
request: dingtalkyida__1__0_models.SearchActivationCodeRequest,
) -> dingtalkyida__1__0_models.SearchActivationCodeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchActivationCodeHeaders()
return await self.search_activation_code_with_options_async(request, headers, runtime)
def search_activation_code_with_options(
self,
request: dingtalkyida__1__0_models.SearchActivationCodeRequest,
headers: dingtalkyida__1__0_models.SearchActivationCodeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchActivationCodeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchActivationCodeResponse(),
self.do_roarequest('SearchActivationCode', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/activationCode/information', 'json', req, runtime)
)
async def search_activation_code_with_options_async(
self,
request: dingtalkyida__1__0_models.SearchActivationCodeRequest,
headers: dingtalkyida__1__0_models.SearchActivationCodeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchActivationCodeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchActivationCodeResponse(),
await self.do_roarequest_async('SearchActivationCode', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/activationCode/information', 'json', req, runtime)
)
def save_print_tpl_detail_info(
self,
request: dingtalkyida__1__0_models.SavePrintTplDetailInfoRequest,
) -> dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SavePrintTplDetailInfoHeaders()
return self.save_print_tpl_detail_info_with_options(request, headers, runtime)
async def save_print_tpl_detail_info_async(
self,
request: dingtalkyida__1__0_models.SavePrintTplDetailInfoRequest,
) -> dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SavePrintTplDetailInfoHeaders()
return await self.save_print_tpl_detail_info_with_options_async(request, headers, runtime)
def save_print_tpl_detail_info_with_options(
self,
request: dingtalkyida__1__0_models.SavePrintTplDetailInfoRequest,
headers: dingtalkyida__1__0_models.SavePrintTplDetailInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.vm):
body['vm'] = request.vm
if not UtilClient.is_unset(request.form_version):
body['formVersion'] = request.form_version
if not UtilClient.is_unset(request.template_id):
body['templateId'] = request.template_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.setting):
body['setting'] = request.setting
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.description):
body['description'] = request.description
if not UtilClient.is_unset(request.file_name_config):
body['fileNameConfig'] = request.file_name_config
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse(),
self.do_roarequest('SavePrintTplDetailInfo', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printTemplates/printTplDetailInfos', 'json', req, runtime)
)
async def save_print_tpl_detail_info_with_options_async(
self,
request: dingtalkyida__1__0_models.SavePrintTplDetailInfoRequest,
headers: dingtalkyida__1__0_models.SavePrintTplDetailInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.vm):
body['vm'] = request.vm
if not UtilClient.is_unset(request.form_version):
body['formVersion'] = request.form_version
if not UtilClient.is_unset(request.template_id):
body['templateId'] = request.template_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.setting):
body['setting'] = request.setting
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.description):
body['description'] = request.description
if not UtilClient.is_unset(request.file_name_config):
body['fileNameConfig'] = request.file_name_config
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse(),
await self.do_roarequest_async('SavePrintTplDetailInfo', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printTemplates/printTplDetailInfos', 'json', req, runtime)
)
def search_employee_field_values(
self,
request: dingtalkyida__1__0_models.SearchEmployeeFieldValuesRequest,
) -> dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchEmployeeFieldValuesHeaders()
return self.search_employee_field_values_with_options(request, headers, runtime)
async def search_employee_field_values_async(
self,
request: dingtalkyida__1__0_models.SearchEmployeeFieldValuesRequest,
) -> dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchEmployeeFieldValuesHeaders()
return await self.search_employee_field_values_with_options_async(request, headers, runtime)
def search_employee_field_values_with_options(
self,
request: dingtalkyida__1__0_models.SearchEmployeeFieldValuesRequest,
headers: dingtalkyida__1__0_models.SearchEmployeeFieldValuesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.target_field_json):
body['targetFieldJson'] = request.target_field_json
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse(),
self.do_roarequest('SearchEmployeeFieldValues', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/employeeFields', 'json', req, runtime)
)
async def search_employee_field_values_with_options_async(
self,
request: dingtalkyida__1__0_models.SearchEmployeeFieldValuesRequest,
headers: dingtalkyida__1__0_models.SearchEmployeeFieldValuesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.target_field_json):
body['targetFieldJson'] = request.target_field_json
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse(),
await self.do_roarequest_async('SearchEmployeeFieldValues', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/employeeFields', 'json', req, runtime)
)
def update_form_data(
self,
request: dingtalkyida__1__0_models.UpdateFormDataRequest,
) -> dingtalkyida__1__0_models.UpdateFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateFormDataHeaders()
return self.update_form_data_with_options(request, headers, runtime)
async def update_form_data_async(
self,
request: dingtalkyida__1__0_models.UpdateFormDataRequest,
) -> dingtalkyida__1__0_models.UpdateFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateFormDataHeaders()
return await self.update_form_data_with_options_async(request, headers, runtime)
def update_form_data_with_options(
self,
request: dingtalkyida__1__0_models.UpdateFormDataRequest,
headers: dingtalkyida__1__0_models.UpdateFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateFormDataResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
body['formInstanceId'] = request.form_instance_id
if not UtilClient.is_unset(request.use_latest_version):
body['useLatestVersion'] = request.use_latest_version
if not UtilClient.is_unset(request.update_form_data_json):
body['updateFormDataJson'] = request.update_form_data_json
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateFormDataResponse(),
self.do_roarequest('UpdateFormData', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/forms/instances', 'none', req, runtime)
)
async def update_form_data_with_options_async(
self,
request: dingtalkyida__1__0_models.UpdateFormDataRequest,
headers: dingtalkyida__1__0_models.UpdateFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateFormDataResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
body['formInstanceId'] = request.form_instance_id
if not UtilClient.is_unset(request.use_latest_version):
body['useLatestVersion'] = request.use_latest_version
if not UtilClient.is_unset(request.update_form_data_json):
body['updateFormDataJson'] = request.update_form_data_json
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateFormDataResponse(),
await self.do_roarequest_async('UpdateFormData', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/forms/instances', 'none', req, runtime)
)
def get_instance_id_list(
self,
request: dingtalkyida__1__0_models.GetInstanceIdListRequest,
) -> dingtalkyida__1__0_models.GetInstanceIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceIdListHeaders()
return self.get_instance_id_list_with_options(request, headers, runtime)
async def get_instance_id_list_async(
self,
request: dingtalkyida__1__0_models.GetInstanceIdListRequest,
) -> dingtalkyida__1__0_models.GetInstanceIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceIdListHeaders()
return await self.get_instance_id_list_with_options_async(request, headers, runtime)
def get_instance_id_list_with_options(
self,
request: dingtalkyida__1__0_models.GetInstanceIdListRequest,
headers: dingtalkyida__1__0_models.GetInstanceIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
body = {}
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.instance_status):
body['instanceStatus'] = request.instance_status
if not UtilClient.is_unset(request.approved_result):
body['approvedResult'] = request.approved_result
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceIdListResponse(),
self.do_roarequest('GetInstanceIdList', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instanceIds', 'json', req, runtime)
)
async def get_instance_id_list_with_options_async(
self,
request: dingtalkyida__1__0_models.GetInstanceIdListRequest,
headers: dingtalkyida__1__0_models.GetInstanceIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
body = {}
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.instance_status):
body['instanceStatus'] = request.instance_status
if not UtilClient.is_unset(request.approved_result):
body['approvedResult'] = request.approved_result
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceIdListResponse(),
await self.do_roarequest_async('GetInstanceIdList', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instanceIds', 'json', req, runtime)
)
def get_operation_records(
self,
request: dingtalkyida__1__0_models.GetOperationRecordsRequest,
) -> dingtalkyida__1__0_models.GetOperationRecordsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOperationRecordsHeaders()
return self.get_operation_records_with_options(request, headers, runtime)
async def get_operation_records_async(
self,
request: dingtalkyida__1__0_models.GetOperationRecordsRequest,
) -> dingtalkyida__1__0_models.GetOperationRecordsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOperationRecordsHeaders()
return await self.get_operation_records_with_options_async(request, headers, runtime)
def get_operation_records_with_options(
self,
request: dingtalkyida__1__0_models.GetOperationRecordsRequest,
headers: dingtalkyida__1__0_models.GetOperationRecordsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOperationRecordsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOperationRecordsResponse(),
self.do_roarequest('GetOperationRecords', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/operationRecords', 'json', req, runtime)
)
async def get_operation_records_with_options_async(
self,
request: dingtalkyida__1__0_models.GetOperationRecordsRequest,
headers: dingtalkyida__1__0_models.GetOperationRecordsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOperationRecordsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOperationRecordsResponse(),
await self.do_roarequest_async('GetOperationRecords', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/operationRecords', 'json', req, runtime)
)
def get_platform_resource(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPlatformResourceHeaders()
return self.get_platform_resource_with_options(request, headers, runtime)
async def get_platform_resource_async(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPlatformResourceHeaders()
return await self.get_platform_resource_with_options_async(request, headers, runtime)
def get_platform_resource_with_options(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetPlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPlatformResourceResponse(),
self.do_roarequest('GetPlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/platformResources', 'json', req, runtime)
)
async def get_platform_resource_with_options_async(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetPlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPlatformResourceResponse(),
await self.do_roarequest_async('GetPlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/platformResources', 'json', req, runtime)
)
def list_connector_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListConnectorInformationHeaders()
return self.list_connector_information_with_options(instance_id, request, headers, runtime)
async def list_connector_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListConnectorInformationHeaders()
return await self.list_connector_information_with_options_async(instance_id, request, headers, runtime)
def list_connector_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListConnectorInformationResponse(),
self.do_roarequest('ListConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/plugins/infos/{instance_id}', 'json', req, runtime)
)
async def list_connector_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListConnectorInformationResponse(),
await self.do_roarequest_async('ListConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/plugins/infos/{instance_id}', 'json', req, runtime)
)
def register_accounts(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RegisterAccountsHeaders()
return self.register_accounts_with_options(request, headers, runtime)
async def register_accounts_async(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RegisterAccountsHeaders()
return await self.register_accounts_with_options_async(request, headers, runtime)
def register_accounts_with_options(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
headers: dingtalkyida__1__0_models.RegisterAccountsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.active_code):
body['activeCode'] = request.active_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RegisterAccountsResponse(),
self.do_roarequest('RegisterAccounts', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/accounts/register', 'json', req, runtime)
)
async def register_accounts_with_options_async(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
headers: dingtalkyida__1__0_models.RegisterAccountsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.active_code):
body['activeCode'] = request.active_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RegisterAccountsResponse(),
await self.do_roarequest_async('RegisterAccounts', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/accounts/register', 'json', req, runtime)
)
def get_notify_me(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetNotifyMeHeaders()
return self.get_notify_me_with_options(user_id, request, headers, runtime)
async def get_notify_me_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetNotifyMeHeaders()
return await self.get_notify_me_with_options_async(user_id, request, headers, runtime)
def get_notify_me_with_options(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
headers: dingtalkyida__1__0_models.GetNotifyMeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.instance_create_from_time_gmt):
query['instanceCreateFromTimeGMT'] = request.instance_create_from_time_gmt
if not UtilClient.is_unset(request.instance_create_to_time_gmt):
query['instanceCreateToTimeGMT'] = request.instance_create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetNotifyMeResponse(),
self.do_roarequest('GetNotifyMe', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpNotifications/{user_id}', 'json', req, runtime)
)
async def get_notify_me_with_options_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
headers: dingtalkyida__1__0_models.GetNotifyMeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.instance_create_from_time_gmt):
query['instanceCreateFromTimeGMT'] = request.instance_create_from_time_gmt
if not UtilClient.is_unset(request.instance_create_to_time_gmt):
query['instanceCreateToTimeGMT'] = request.instance_create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetNotifyMeResponse(),
await self.do_roarequest_async('GetNotifyMe', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpNotifications/{user_id}', 'json', req, runtime)
)
def expire_commodity(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExpireCommodityHeaders()
return self.expire_commodity_with_options(request, headers, runtime)
async def expire_commodity_async(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExpireCommodityHeaders()
return await self.expire_commodity_with_options_async(request, headers, runtime)
def expire_commodity_with_options(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
headers: dingtalkyida__1__0_models.ExpireCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExpireCommodityResponse(),
self.do_roarequest('ExpireCommodity', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/appAuth/commodities/expire', 'json', req, runtime)
)
async def expire_commodity_with_options_async(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
headers: dingtalkyida__1__0_models.ExpireCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExpireCommodityResponse(),
await self.do_roarequest_async('ExpireCommodity', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/appAuth/commodities/expire', 'json', req, runtime)
)
def get_instance_by_id(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceByIdHeaders()
return self.get_instance_by_id_with_options(id, request, headers, runtime)
async def get_instance_by_id_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceByIdHeaders()
return await self.get_instance_by_id_with_options_async(id, request, headers, runtime)
def get_instance_by_id_with_options(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
headers: dingtalkyida__1__0_models.GetInstanceByIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceByIdResponse(),
self.do_roarequest('GetInstanceById', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instancesInfos/{id}', 'json', req, runtime)
)
async def get_instance_by_id_with_options_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
headers: dingtalkyida__1__0_models.GetInstanceByIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceByIdResponse(),
await self.do_roarequest_async('GetInstanceById', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instancesInfos/{id}', 'json', req, runtime)
)
def redirect_task(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RedirectTaskHeaders()
return self.redirect_task_with_options(request, headers, runtime)
async def redirect_task_async(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RedirectTaskHeaders()
return await self.redirect_task_with_options_async(request, headers, runtime)
def redirect_task_with_options(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
headers: dingtalkyida__1__0_models.RedirectTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.by_manager):
body['byManager'] = request.by_manager
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.now_action_executor_id):
body['nowActionExecutorId'] = request.now_action_executor_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RedirectTaskResponse(),
self.do_roarequest('RedirectTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/redirect', 'none', req, runtime)
)
async def redirect_task_with_options_async(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
headers: dingtalkyida__1__0_models.RedirectTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.by_manager):
body['byManager'] = request.by_manager
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.now_action_executor_id):
body['nowActionExecutorId'] = request.now_action_executor_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RedirectTaskResponse(),
await self.do_roarequest_async('RedirectTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/redirect', 'none', req, runtime)
)
def validate_order_update(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateOrderUpdateRequest,
) -> dingtalkyida__1__0_models.ValidateOrderUpdateResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderUpdateHeaders()
return self.validate_order_update_with_options(instance_id, request, headers, runtime)
async def validate_order_update_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateOrderUpdateRequest,
) -> dingtalkyida__1__0_models.ValidateOrderUpdateResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderUpdateHeaders()
return await self.validate_order_update_with_options_async(instance_id, request, headers, runtime)
def validate_order_update_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateOrderUpdateRequest,
headers: dingtalkyida__1__0_models.ValidateOrderUpdateHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderUpdateResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderUpdateResponse(),
self.do_roarequest('ValidateOrderUpdate', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/orders/renewalReviews/{instance_id}', 'json', req, runtime)
)
async def validate_order_update_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateOrderUpdateRequest,
headers: dingtalkyida__1__0_models.ValidateOrderUpdateHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderUpdateResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderUpdateResponse(),
await self.do_roarequest_async('ValidateOrderUpdate', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/orders/renewalReviews/{instance_id}', 'json', req, runtime)
)
def get_form_component_definition_list(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.GetFormComponentDefinitionListRequest,
) -> dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetFormComponentDefinitionListHeaders()
return self.get_form_component_definition_list_with_options(app_type, form_uuid, request, headers, runtime)
async def get_form_component_definition_list_async(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.GetFormComponentDefinitionListRequest,
) -> dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetFormComponentDefinitionListHeaders()
return await self.get_form_component_definition_list_with_options_async(app_type, form_uuid, request, headers, runtime)
def get_form_component_definition_list_with_options(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.GetFormComponentDefinitionListRequest,
headers: dingtalkyida__1__0_models.GetFormComponentDefinitionListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.version):
query['version'] = request.version
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse(),
self.do_roarequest('GetFormComponentDefinitionList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/definitions/{app_type}/{form_uuid}', 'json', req, runtime)
)
async def get_form_component_definition_list_with_options_async(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.GetFormComponentDefinitionListRequest,
headers: dingtalkyida__1__0_models.GetFormComponentDefinitionListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.version):
query['version'] = request.version
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse(),
await self.do_roarequest_async('GetFormComponentDefinitionList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/definitions/{app_type}/{form_uuid}', 'json', req, runtime)
)
def get_print_app_info(
self,
request: dingtalkyida__1__0_models.GetPrintAppInfoRequest,
) -> dingtalkyida__1__0_models.GetPrintAppInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPrintAppInfoHeaders()
return self.get_print_app_info_with_options(request, headers, runtime)
async def get_print_app_info_async(
self,
request: dingtalkyida__1__0_models.GetPrintAppInfoRequest,
) -> dingtalkyida__1__0_models.GetPrintAppInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPrintAppInfoHeaders()
return await self.get_print_app_info_with_options_async(request, headers, runtime)
def get_print_app_info_with_options(
self,
request: dingtalkyida__1__0_models.GetPrintAppInfoRequest,
headers: dingtalkyida__1__0_models.GetPrintAppInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPrintAppInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.name_like):
query['nameLike'] = request.name_like
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPrintAppInfoResponse(),
self.do_roarequest('GetPrintAppInfo', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/printTemplates/printAppInfos', 'json', req, runtime)
)
async def get_print_app_info_with_options_async(
self,
request: dingtalkyida__1__0_models.GetPrintAppInfoRequest,
headers: dingtalkyida__1__0_models.GetPrintAppInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPrintAppInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.name_like):
query['nameLike'] = request.name_like
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPrintAppInfoResponse(),
await self.do_roarequest_async('GetPrintAppInfo', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/printTemplates/printAppInfos', 'json', req, runtime)
)
def save_form_data(
self,
request: dingtalkyida__1__0_models.SaveFormDataRequest,
) -> dingtalkyida__1__0_models.SaveFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SaveFormDataHeaders()
return self.save_form_data_with_options(request, headers, runtime)
async def save_form_data_async(
self,
request: dingtalkyida__1__0_models.SaveFormDataRequest,
) -> dingtalkyida__1__0_models.SaveFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SaveFormDataHeaders()
return await self.save_form_data_with_options_async(request, headers, runtime)
def save_form_data_with_options(
self,
request: dingtalkyida__1__0_models.SaveFormDataRequest,
headers: dingtalkyida__1__0_models.SaveFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SaveFormDataResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SaveFormDataResponse(),
self.do_roarequest('SaveFormData', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances', 'json', req, runtime)
)
async def save_form_data_with_options_async(
self,
request: dingtalkyida__1__0_models.SaveFormDataRequest,
headers: dingtalkyida__1__0_models.SaveFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SaveFormDataResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SaveFormDataResponse(),
await self.do_roarequest_async('SaveFormData', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances', 'json', req, runtime)
)
def get_me_corp_submission(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetMeCorpSubmissionRequest,
) -> dingtalkyida__1__0_models.GetMeCorpSubmissionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetMeCorpSubmissionHeaders()
return self.get_me_corp_submission_with_options(user_id, request, headers, runtime)
async def get_me_corp_submission_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetMeCorpSubmissionRequest,
) -> dingtalkyida__1__0_models.GetMeCorpSubmissionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetMeCorpSubmissionHeaders()
return await self.get_me_corp_submission_with_options_async(user_id, request, headers, runtime)
def get_me_corp_submission_with_options(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetMeCorpSubmissionRequest,
headers: dingtalkyida__1__0_models.GetMeCorpSubmissionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetMeCorpSubmissionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetMeCorpSubmissionResponse(),
self.do_roarequest('GetMeCorpSubmission', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/myCorpSubmission/{user_id}', 'json', req, runtime)
)
async def get_me_corp_submission_with_options_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetMeCorpSubmissionRequest,
headers: dingtalkyida__1__0_models.GetMeCorpSubmissionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetMeCorpSubmissionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetMeCorpSubmissionResponse(),
await self.do_roarequest_async('GetMeCorpSubmission', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/myCorpSubmission/{user_id}', 'json', req, runtime)
)
def delete_form_data(
self,
request: dingtalkyida__1__0_models.DeleteFormDataRequest,
) -> dingtalkyida__1__0_models.DeleteFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteFormDataHeaders()
return self.delete_form_data_with_options(request, headers, runtime)
async def delete_form_data_async(
self,
request: dingtalkyida__1__0_models.DeleteFormDataRequest,
) -> dingtalkyida__1__0_models.DeleteFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteFormDataHeaders()
return await self.delete_form_data_with_options_async(request, headers, runtime)
def delete_form_data_with_options(
self,
request: dingtalkyida__1__0_models.DeleteFormDataRequest,
headers: dingtalkyida__1__0_models.DeleteFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteFormDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
query['formInstanceId'] = request.form_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteFormDataResponse(),
self.do_roarequest('DeleteFormData', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/forms/instances', 'none', req, runtime)
)
async def delete_form_data_with_options_async(
self,
request: dingtalkyida__1__0_models.DeleteFormDataRequest,
headers: dingtalkyida__1__0_models.DeleteFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteFormDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
query['formInstanceId'] = request.form_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteFormDataResponse(),
await self.do_roarequest_async('DeleteFormData', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/forms/instances', 'none', req, runtime)
)
def search_form_data_id_list(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.SearchFormDataIdListRequest,
) -> dingtalkyida__1__0_models.SearchFormDataIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchFormDataIdListHeaders()
return self.search_form_data_id_list_with_options(app_type, form_uuid, request, headers, runtime)
async def search_form_data_id_list_async(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.SearchFormDataIdListRequest,
) -> dingtalkyida__1__0_models.SearchFormDataIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchFormDataIdListHeaders()
return await self.search_form_data_id_list_with_options_async(app_type, form_uuid, request, headers, runtime)
def search_form_data_id_list_with_options(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.SearchFormDataIdListRequest,
headers: dingtalkyida__1__0_models.SearchFormDataIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchFormDataIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
body = {}
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchFormDataIdListResponse(),
self.do_roarequest('SearchFormDataIdList', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances/ids/{app_type}/{form_uuid}', 'json', req, runtime)
)
async def search_form_data_id_list_with_options_async(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.SearchFormDataIdListRequest,
headers: dingtalkyida__1__0_models.SearchFormDataIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchFormDataIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
body = {}
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchFormDataIdListResponse(),
await self.do_roarequest_async('SearchFormDataIdList', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances/ids/{app_type}/{form_uuid}', 'json', req, runtime)
)
def get_activation_code_by_caller_union_id(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdRequest,
) -> dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdHeaders()
return self.get_activation_code_by_caller_union_id_with_options(caller_uid, request, headers, runtime)
async def get_activation_code_by_caller_union_id_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdRequest,
) -> dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdHeaders()
return await self.get_activation_code_by_caller_union_id_with_options_async(caller_uid, request, headers, runtime)
def get_activation_code_by_caller_union_id_with_options(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdRequest,
headers: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse(),
self.do_roarequest('GetActivationCodeByCallerUnionId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applications/activationCodes/{caller_uid}', 'json', req, runtime)
)
async def get_activation_code_by_caller_union_id_with_options_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdRequest,
headers: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse(),
await self.do_roarequest_async('GetActivationCodeByCallerUnionId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applications/activationCodes/{caller_uid}', 'json', req, runtime)
)
def get_form_data_by_id(
self,
id: str,
request: dingtalkyida__1__0_models.GetFormDataByIDRequest,
) -> dingtalkyida__1__0_models.GetFormDataByIDResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetFormDataByIDHeaders()
return self.get_form_data_by_idwith_options(id, request, headers, runtime)
async def get_form_data_by_id_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetFormDataByIDRequest,
) -> dingtalkyida__1__0_models.GetFormDataByIDResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetFormDataByIDHeaders()
return await self.get_form_data_by_idwith_options_async(id, request, headers, runtime)
def get_form_data_by_idwith_options(
self,
id: str,
request: dingtalkyida__1__0_models.GetFormDataByIDRequest,
headers: dingtalkyida__1__0_models.GetFormDataByIDHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetFormDataByIDResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetFormDataByIDResponse(),
self.do_roarequest('GetFormDataByID', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/instances/{id}', 'json', req, runtime)
)
async def get_form_data_by_idwith_options_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetFormDataByIDRequest,
headers: dingtalkyida__1__0_models.GetFormDataByIDHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetFormDataByIDResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetFormDataByIDResponse(),
await self.do_roarequest_async('GetFormDataByID', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/instances/{id}', 'json', req, runtime)
)
def refund_commodity(
self,
request: dingtalkyida__1__0_models.RefundCommodityRequest,
) -> dingtalkyida__1__0_models.RefundCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RefundCommodityHeaders()
return self.refund_commodity_with_options(request, headers, runtime)
async def refund_commodity_async(
self,
request: dingtalkyida__1__0_models.RefundCommodityRequest,
) -> dingtalkyida__1__0_models.RefundCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RefundCommodityHeaders()
return await self.refund_commodity_with_options_async(request, headers, runtime)
def refund_commodity_with_options(
self,
request: dingtalkyida__1__0_models.RefundCommodityRequest,
headers: dingtalkyida__1__0_models.RefundCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RefundCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RefundCommodityResponse(),
self.do_roarequest('RefundCommodity', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/appAuth/commodities/refund', 'json', req, runtime)
)
async def refund_commodity_with_options_async(
self,
request: dingtalkyida__1__0_models.RefundCommodityRequest,
headers: dingtalkyida__1__0_models.RefundCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RefundCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RefundCommodityResponse(),
await self.do_roarequest_async('RefundCommodity', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/appAuth/commodities/refund', 'json', req, runtime)
)
def delete_sequence(
self,
request: dingtalkyida__1__0_models.DeleteSequenceRequest,
) -> dingtalkyida__1__0_models.DeleteSequenceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteSequenceHeaders()
return self.delete_sequence_with_options(request, headers, runtime)
async def delete_sequence_async(
self,
request: dingtalkyida__1__0_models.DeleteSequenceRequest,
) -> dingtalkyida__1__0_models.DeleteSequenceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteSequenceHeaders()
return await self.delete_sequence_with_options_async(request, headers, runtime)
def delete_sequence_with_options(
self,
request: dingtalkyida__1__0_models.DeleteSequenceRequest,
headers: dingtalkyida__1__0_models.DeleteSequenceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteSequenceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.sequence):
query['sequence'] = request.sequence
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteSequenceResponse(),
self.do_roarequest('DeleteSequence', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/forms/deleteSequence', 'none', req, runtime)
)
async def delete_sequence_with_options_async(
self,
request: dingtalkyida__1__0_models.DeleteSequenceRequest,
headers: dingtalkyida__1__0_models.DeleteSequenceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteSequenceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.sequence):
query['sequence'] = request.sequence
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteSequenceResponse(),
await self.do_roarequest_async('DeleteSequence', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/forms/deleteSequence', 'none', req, runtime)
)
def release_commodity(
self,
request: dingtalkyida__1__0_models.ReleaseCommodityRequest,
) -> dingtalkyida__1__0_models.ReleaseCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ReleaseCommodityHeaders()
return self.release_commodity_with_options(request, headers, runtime)
async def release_commodity_async(
self,
request: dingtalkyida__1__0_models.ReleaseCommodityRequest,
) -> dingtalkyida__1__0_models.ReleaseCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ReleaseCommodityHeaders()
return await self.release_commodity_with_options_async(request, headers, runtime)
def release_commodity_with_options(
self,
request: dingtalkyida__1__0_models.ReleaseCommodityRequest,
headers: dingtalkyida__1__0_models.ReleaseCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ReleaseCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ReleaseCommodityResponse(),
self.do_roarequest('ReleaseCommodity', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/appAuth/commodities/release', 'json', req, runtime)
)
async def release_commodity_with_options_async(
self,
request: dingtalkyida__1__0_models.ReleaseCommodityRequest,
headers: dingtalkyida__1__0_models.ReleaseCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ReleaseCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ReleaseCommodityResponse(),
await self.do_roarequest_async('ReleaseCommodity', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/appAuth/commodities/release', 'json', req, runtime)
)
def render_batch_callback(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenderBatchCallbackHeaders()
return self.render_batch_callback_with_options(request, headers, runtime)
async def render_batch_callback_async(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenderBatchCallbackHeaders()
return await self.render_batch_callback_with_options_async(request, headers, runtime)
def render_batch_callback_with_options(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
headers: dingtalkyida__1__0_models.RenderBatchCallbackHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.oss_url):
body['ossUrl'] = request.oss_url
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.namespace):
body['namespace'] = request.namespace
if not UtilClient.is_unset(request.time_zone):
body['timeZone'] = request.time_zone
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.source):
body['source'] = request.source
if not UtilClient.is_unset(request.sequence_id):
body['sequenceId'] = request.sequence_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenderBatchCallbackResponse(),
self.do_roarequest('RenderBatchCallback', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printings/callbacks/batch', 'none', req, runtime)
)
async def render_batch_callback_with_options_async(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
headers: dingtalkyida__1__0_models.RenderBatchCallbackHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.oss_url):
body['ossUrl'] = request.oss_url
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.namespace):
body['namespace'] = request.namespace
if not UtilClient.is_unset(request.time_zone):
body['timeZone'] = request.time_zone
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.source):
body['source'] = request.source
if not UtilClient.is_unset(request.sequence_id):
body['sequenceId'] = request.sequence_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenderBatchCallbackResponse(),
await self.do_roarequest_async('RenderBatchCallback', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printings/callbacks/batch', 'none', req, runtime)
)
def get_open_url(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOpenUrlHeaders()
return self.get_open_url_with_options(app_type, request, headers, runtime)
async def get_open_url_async(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOpenUrlHeaders()
return await self.get_open_url_with_options_async(app_type, request, headers, runtime)
def get_open_url_with_options(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
headers: dingtalkyida__1__0_models.GetOpenUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.file_url):
query['fileUrl'] = request.file_url
if not UtilClient.is_unset(request.timeout):
query['timeout'] = request.timeout
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOpenUrlResponse(),
self.do_roarequest('GetOpenUrl', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/temporaryUrls/{app_type}', 'json', req, runtime)
)
async def get_open_url_with_options_async(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
headers: dingtalkyida__1__0_models.GetOpenUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.file_url):
query['fileUrl'] = request.file_url
if not UtilClient.is_unset(request.timeout):
query['timeout'] = request.timeout
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOpenUrlResponse(),
await self.do_roarequest_async('GetOpenUrl', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/temporaryUrls/{app_type}', 'json', req, runtime)
)
def get_sale_user_info_by_user_id(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders()
return self.get_sale_user_info_by_user_id_with_options(request, headers, runtime)
async def get_sale_user_info_by_user_id_async(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders()
return await self.get_sale_user_info_by_user_id_with_options_async(request, headers, runtime)
def get_sale_user_info_by_user_id_with_options(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
headers: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.namespace):
query['namespace'] = request.namespace
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse(),
self.do_roarequest('GetSaleUserInfoByUserId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/saleUserInfo', 'json', req, runtime)
)
async def get_sale_user_info_by_user_id_with_options_async(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
headers: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.namespace):
query['namespace'] = request.namespace
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse(),
await self.do_roarequest_async('GetSaleUserInfoByUserId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/saleUserInfo', 'json', req, runtime)
)
def validate_application_authorization_order(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders()
return self.validate_application_authorization_order_with_options(instance_id, request, headers, runtime)
async def validate_application_authorization_order_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders()
return await self.validate_application_authorization_order_with_options_async(instance_id, request, headers, runtime)
def validate_application_authorization_order_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse(),
self.do_roarequest('ValidateApplicationAuthorizationOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationOrderUpdateAuthorizations/{instance_id}', 'json', req, runtime)
)
async def validate_application_authorization_order_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse(),
await self.do_roarequest_async('ValidateApplicationAuthorizationOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationOrderUpdateAuthorizations/{instance_id}', 'json', req, runtime)
)
def execute_task(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteTaskHeaders()
return self.execute_task_with_options(request, headers, runtime)
async def execute_task_async(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteTaskHeaders()
return await self.execute_task_with_options_async(request, headers, runtime)
def execute_task_with_options(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
headers: dingtalkyida__1__0_models.ExecuteTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteTaskResponse(),
self.do_roarequest('ExecuteTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/execute', 'none', req, runtime)
)
async def execute_task_with_options_async(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
headers: dingtalkyida__1__0_models.ExecuteTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteTaskResponse(),
await self.do_roarequest_async('ExecuteTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/execute', 'none', req, runtime)
)
def delete_instance(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteInstanceHeaders()
return self.delete_instance_with_options(request, headers, runtime)
async def delete_instance_async(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteInstanceHeaders()
return await self.delete_instance_with_options_async(request, headers, runtime)
def delete_instance_with_options(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
headers: dingtalkyida__1__0_models.DeleteInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteInstanceResponse(),
self.do_roarequest('DeleteInstance', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/processes/instances', 'none', req, runtime)
)
async def delete_instance_with_options_async(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
headers: dingtalkyida__1__0_models.DeleteInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteInstanceResponse(),
await self.do_roarequest_async('DeleteInstance', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/processes/instances', 'none', req, runtime)
) | dingtalk/python/alibabacloud_dingtalk/yida_1_0/client.py | from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.yida_1_0 import models as dingtalkyida__1__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def update_status(
self,
request: dingtalkyida__1__0_models.UpdateStatusRequest,
) -> dingtalkyida__1__0_models.UpdateStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateStatusHeaders()
return self.update_status_with_options(request, headers, runtime)
async def update_status_async(
self,
request: dingtalkyida__1__0_models.UpdateStatusRequest,
) -> dingtalkyida__1__0_models.UpdateStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateStatusHeaders()
return await self.update_status_with_options_async(request, headers, runtime)
def update_status_with_options(
self,
request: dingtalkyida__1__0_models.UpdateStatusRequest,
headers: dingtalkyida__1__0_models.UpdateStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateStatusResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.import_sequence):
body['importSequence'] = request.import_sequence
if not UtilClient.is_unset(request.error_lines):
body['errorLines'] = request.error_lines
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateStatusResponse(),
self.do_roarequest('UpdateStatus', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/forms/status', 'none', req, runtime)
)
async def update_status_with_options_async(
self,
request: dingtalkyida__1__0_models.UpdateStatusRequest,
headers: dingtalkyida__1__0_models.UpdateStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateStatusResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.import_sequence):
body['importSequence'] = request.import_sequence
if not UtilClient.is_unset(request.error_lines):
body['errorLines'] = request.error_lines
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateStatusResponse(),
await self.do_roarequest_async('UpdateStatus', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/forms/status', 'none', req, runtime)
)
def get_instances_by_id_list(
self,
request: dingtalkyida__1__0_models.GetInstancesByIdListRequest,
) -> dingtalkyida__1__0_models.GetInstancesByIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstancesByIdListHeaders()
return self.get_instances_by_id_list_with_options(request, headers, runtime)
async def get_instances_by_id_list_async(
self,
request: dingtalkyida__1__0_models.GetInstancesByIdListRequest,
) -> dingtalkyida__1__0_models.GetInstancesByIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstancesByIdListHeaders()
return await self.get_instances_by_id_list_with_options_async(request, headers, runtime)
def get_instances_by_id_list_with_options(
self,
request: dingtalkyida__1__0_models.GetInstancesByIdListRequest,
headers: dingtalkyida__1__0_models.GetInstancesByIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstancesByIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_ids):
query['processInstanceIds'] = request.process_instance_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstancesByIdListResponse(),
self.do_roarequest('GetInstancesByIdList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instances/searchWithIds', 'json', req, runtime)
)
async def get_instances_by_id_list_with_options_async(
self,
request: dingtalkyida__1__0_models.GetInstancesByIdListRequest,
headers: dingtalkyida__1__0_models.GetInstancesByIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstancesByIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_ids):
query['processInstanceIds'] = request.process_instance_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstancesByIdListResponse(),
await self.do_roarequest_async('GetInstancesByIdList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instances/searchWithIds', 'json', req, runtime)
)
def save_form_remark(
self,
request: dingtalkyida__1__0_models.SaveFormRemarkRequest,
) -> dingtalkyida__1__0_models.SaveFormRemarkResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SaveFormRemarkHeaders()
return self.save_form_remark_with_options(request, headers, runtime)
async def save_form_remark_async(
self,
request: dingtalkyida__1__0_models.SaveFormRemarkRequest,
) -> dingtalkyida__1__0_models.SaveFormRemarkResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SaveFormRemarkHeaders()
return await self.save_form_remark_with_options_async(request, headers, runtime)
def save_form_remark_with_options(
self,
request: dingtalkyida__1__0_models.SaveFormRemarkRequest,
headers: dingtalkyida__1__0_models.SaveFormRemarkHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SaveFormRemarkResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.reply_id):
body['replyId'] = request.reply_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
body['formInstanceId'] = request.form_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.at_user_id):
body['atUserId'] = request.at_user_id
if not UtilClient.is_unset(request.content):
body['content'] = request.content
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SaveFormRemarkResponse(),
self.do_roarequest('SaveFormRemark', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/remarks', 'json', req, runtime)
)
async def save_form_remark_with_options_async(
self,
request: dingtalkyida__1__0_models.SaveFormRemarkRequest,
headers: dingtalkyida__1__0_models.SaveFormRemarkHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SaveFormRemarkResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.reply_id):
body['replyId'] = request.reply_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
body['formInstanceId'] = request.form_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.at_user_id):
body['atUserId'] = request.at_user_id
if not UtilClient.is_unset(request.content):
body['content'] = request.content
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SaveFormRemarkResponse(),
await self.do_roarequest_async('SaveFormRemark', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/remarks', 'json', req, runtime)
)
def list_table_data_by_form_instance_id_table_id(
self,
form_instance_id: str,
request: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdRequest,
) -> dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdHeaders()
return self.list_table_data_by_form_instance_id_table_id_with_options(form_instance_id, request, headers, runtime)
async def list_table_data_by_form_instance_id_table_id_async(
self,
form_instance_id: str,
request: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdRequest,
) -> dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdHeaders()
return await self.list_table_data_by_form_instance_id_table_id_with_options_async(form_instance_id, request, headers, runtime)
def list_table_data_by_form_instance_id_table_id_with_options(
self,
form_instance_id: str,
request: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdRequest,
headers: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.form_uuid):
query['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.table_field_id):
query['tableFieldId'] = request.table_field_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse(),
self.do_roarequest('ListTableDataByFormInstanceIdTableId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/innerTables/{form_instance_id}', 'json', req, runtime)
)
async def list_table_data_by_form_instance_id_table_id_with_options_async(
self,
form_instance_id: str,
request: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdRequest,
headers: dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.form_uuid):
query['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.table_field_id):
query['tableFieldId'] = request.table_field_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListTableDataByFormInstanceIdTableIdResponse(),
await self.do_roarequest_async('ListTableDataByFormInstanceIdTableId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/innerTables/{form_instance_id}', 'json', req, runtime)
)
def get_task_copies(
self,
request: dingtalkyida__1__0_models.GetTaskCopiesRequest,
) -> dingtalkyida__1__0_models.GetTaskCopiesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetTaskCopiesHeaders()
return self.get_task_copies_with_options(request, headers, runtime)
async def get_task_copies_async(
self,
request: dingtalkyida__1__0_models.GetTaskCopiesRequest,
) -> dingtalkyida__1__0_models.GetTaskCopiesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetTaskCopiesHeaders()
return await self.get_task_copies_with_options_async(request, headers, runtime)
def get_task_copies_with_options(
self,
request: dingtalkyida__1__0_models.GetTaskCopiesRequest,
headers: dingtalkyida__1__0_models.GetTaskCopiesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetTaskCopiesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetTaskCopiesResponse(),
self.do_roarequest('GetTaskCopies', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/taskCopies', 'json', req, runtime)
)
async def get_task_copies_with_options_async(
self,
request: dingtalkyida__1__0_models.GetTaskCopiesRequest,
headers: dingtalkyida__1__0_models.GetTaskCopiesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetTaskCopiesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetTaskCopiesResponse(),
await self.do_roarequest_async('GetTaskCopies', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/taskCopies', 'json', req, runtime)
)
def get_running_tasks(
self,
request: dingtalkyida__1__0_models.GetRunningTasksRequest,
) -> dingtalkyida__1__0_models.GetRunningTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetRunningTasksHeaders()
return self.get_running_tasks_with_options(request, headers, runtime)
async def get_running_tasks_async(
self,
request: dingtalkyida__1__0_models.GetRunningTasksRequest,
) -> dingtalkyida__1__0_models.GetRunningTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetRunningTasksHeaders()
return await self.get_running_tasks_with_options_async(request, headers, runtime)
def get_running_tasks_with_options(
self,
request: dingtalkyida__1__0_models.GetRunningTasksRequest,
headers: dingtalkyida__1__0_models.GetRunningTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetRunningTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetRunningTasksResponse(),
self.do_roarequest('GetRunningTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/tasks/getRunningTasks', 'json', req, runtime)
)
async def get_running_tasks_with_options_async(
self,
request: dingtalkyida__1__0_models.GetRunningTasksRequest,
headers: dingtalkyida__1__0_models.GetRunningTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetRunningTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetRunningTasksResponse(),
await self.do_roarequest_async('GetRunningTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/tasks/getRunningTasks', 'json', req, runtime)
)
def list_navigation_by_form_type(
self,
request: dingtalkyida__1__0_models.ListNavigationByFormTypeRequest,
) -> dingtalkyida__1__0_models.ListNavigationByFormTypeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListNavigationByFormTypeHeaders()
return self.list_navigation_by_form_type_with_options(request, headers, runtime)
async def list_navigation_by_form_type_async(
self,
request: dingtalkyida__1__0_models.ListNavigationByFormTypeRequest,
) -> dingtalkyida__1__0_models.ListNavigationByFormTypeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListNavigationByFormTypeHeaders()
return await self.list_navigation_by_form_type_with_options_async(request, headers, runtime)
def list_navigation_by_form_type_with_options(
self,
request: dingtalkyida__1__0_models.ListNavigationByFormTypeRequest,
headers: dingtalkyida__1__0_models.ListNavigationByFormTypeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListNavigationByFormTypeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.form_type):
query['formType'] = request.form_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListNavigationByFormTypeResponse(),
self.do_roarequest('ListNavigationByFormType', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/navigations', 'json', req, runtime)
)
async def list_navigation_by_form_type_with_options_async(
self,
request: dingtalkyida__1__0_models.ListNavigationByFormTypeRequest,
headers: dingtalkyida__1__0_models.ListNavigationByFormTypeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListNavigationByFormTypeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.form_type):
query['formType'] = request.form_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListNavigationByFormTypeResponse(),
await self.do_roarequest_async('ListNavigationByFormType', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/navigations', 'json', req, runtime)
)
def terminate_instance(
self,
request: dingtalkyida__1__0_models.TerminateInstanceRequest,
) -> dingtalkyida__1__0_models.TerminateInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.TerminateInstanceHeaders()
return self.terminate_instance_with_options(request, headers, runtime)
async def terminate_instance_async(
self,
request: dingtalkyida__1__0_models.TerminateInstanceRequest,
) -> dingtalkyida__1__0_models.TerminateInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.TerminateInstanceHeaders()
return await self.terminate_instance_with_options_async(request, headers, runtime)
def terminate_instance_with_options(
self,
request: dingtalkyida__1__0_models.TerminateInstanceRequest,
headers: dingtalkyida__1__0_models.TerminateInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.TerminateInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.TerminateInstanceResponse(),
self.do_roarequest('TerminateInstance', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/processes/instances/terminate', 'none', req, runtime)
)
async def terminate_instance_with_options_async(
self,
request: dingtalkyida__1__0_models.TerminateInstanceRequest,
headers: dingtalkyida__1__0_models.TerminateInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.TerminateInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.TerminateInstanceResponse(),
await self.do_roarequest_async('TerminateInstance', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/processes/instances/terminate', 'none', req, runtime)
)
def check_cloud_account_status(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.CheckCloudAccountStatusRequest,
) -> dingtalkyida__1__0_models.CheckCloudAccountStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.CheckCloudAccountStatusHeaders()
return self.check_cloud_account_status_with_options(caller_uid, request, headers, runtime)
async def check_cloud_account_status_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.CheckCloudAccountStatusRequest,
) -> dingtalkyida__1__0_models.CheckCloudAccountStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.CheckCloudAccountStatusHeaders()
return await self.check_cloud_account_status_with_options_async(caller_uid, request, headers, runtime)
def check_cloud_account_status_with_options(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.CheckCloudAccountStatusRequest,
headers: dingtalkyida__1__0_models.CheckCloudAccountStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.CheckCloudAccountStatusResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.CheckCloudAccountStatusResponse(),
self.do_roarequest('CheckCloudAccountStatus', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/cloudAccountStatus/{caller_uid}', 'json', req, runtime)
)
async def check_cloud_account_status_with_options_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.CheckCloudAccountStatusRequest,
headers: dingtalkyida__1__0_models.CheckCloudAccountStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.CheckCloudAccountStatusResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.CheckCloudAccountStatusResponse(),
await self.do_roarequest_async('CheckCloudAccountStatus', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/cloudAccountStatus/{caller_uid}', 'json', req, runtime)
)
def get_corp_accomplishment_tasks(
self,
corp_id: str,
user_id: str,
request: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksRequest,
) -> dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpAccomplishmentTasksHeaders()
return self.get_corp_accomplishment_tasks_with_options(corp_id, user_id, request, headers, runtime)
async def get_corp_accomplishment_tasks_async(
self,
corp_id: str,
user_id: str,
request: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksRequest,
) -> dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpAccomplishmentTasksHeaders()
return await self.get_corp_accomplishment_tasks_with_options_async(corp_id, user_id, request, headers, runtime)
def get_corp_accomplishment_tasks_with_options(
self,
corp_id: str,
user_id: str,
request: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksRequest,
headers: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse(),
self.do_roarequest('GetCorpAccomplishmentTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/completedTasks/{corp_id}/{user_id}', 'json', req, runtime)
)
async def get_corp_accomplishment_tasks_with_options_async(
self,
corp_id: str,
user_id: str,
request: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksRequest,
headers: dingtalkyida__1__0_models.GetCorpAccomplishmentTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpAccomplishmentTasksResponse(),
await self.do_roarequest_async('GetCorpAccomplishmentTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/completedTasks/{corp_id}/{user_id}', 'json', req, runtime)
)
def get_instances(
self,
request: dingtalkyida__1__0_models.GetInstancesRequest,
) -> dingtalkyida__1__0_models.GetInstancesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstancesHeaders()
return self.get_instances_with_options(request, headers, runtime)
async def get_instances_async(
self,
request: dingtalkyida__1__0_models.GetInstancesRequest,
) -> dingtalkyida__1__0_models.GetInstancesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstancesHeaders()
return await self.get_instances_with_options_async(request, headers, runtime)
def get_instances_with_options(
self,
request: dingtalkyida__1__0_models.GetInstancesRequest,
headers: dingtalkyida__1__0_models.GetInstancesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstancesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.instance_status):
body['instanceStatus'] = request.instance_status
if not UtilClient.is_unset(request.approved_result):
body['approvedResult'] = request.approved_result
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstancesResponse(),
self.do_roarequest('GetInstances', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instances', 'json', req, runtime)
)
async def get_instances_with_options_async(
self,
request: dingtalkyida__1__0_models.GetInstancesRequest,
headers: dingtalkyida__1__0_models.GetInstancesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstancesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.instance_status):
body['instanceStatus'] = request.instance_status
if not UtilClient.is_unset(request.approved_result):
body['approvedResult'] = request.approved_result
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstancesResponse(),
await self.do_roarequest_async('GetInstances', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instances', 'json', req, runtime)
)
def list_application_authorization_service_connector_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationHeaders()
return self.list_application_authorization_service_connector_information_with_options(instance_id, request, headers, runtime)
async def list_application_authorization_service_connector_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationHeaders()
return await self.list_application_authorization_service_connector_information_with_options_async(instance_id, request, headers, runtime)
def list_application_authorization_service_connector_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse(),
self.do_roarequest('ListApplicationAuthorizationServiceConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationAuthorizations/plugs/{instance_id}', 'json', req, runtime)
)
async def list_application_authorization_service_connector_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceConnectorInformationResponse(),
await self.do_roarequest_async('ListApplicationAuthorizationServiceConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationAuthorizations/plugs/{instance_id}', 'json', req, runtime)
)
def validate_order_buy(
self,
request: dingtalkyida__1__0_models.ValidateOrderBuyRequest,
) -> dingtalkyida__1__0_models.ValidateOrderBuyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderBuyHeaders()
return self.validate_order_buy_with_options(request, headers, runtime)
async def validate_order_buy_async(
self,
request: dingtalkyida__1__0_models.ValidateOrderBuyRequest,
) -> dingtalkyida__1__0_models.ValidateOrderBuyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderBuyHeaders()
return await self.validate_order_buy_with_options_async(request, headers, runtime)
def validate_order_buy_with_options(
self,
request: dingtalkyida__1__0_models.ValidateOrderBuyRequest,
headers: dingtalkyida__1__0_models.ValidateOrderBuyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderBuyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderBuyResponse(),
self.do_roarequest('ValidateOrderBuy', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/orderBuy/validate', 'json', req, runtime)
)
async def validate_order_buy_with_options_async(
self,
request: dingtalkyida__1__0_models.ValidateOrderBuyRequest,
headers: dingtalkyida__1__0_models.ValidateOrderBuyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderBuyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderBuyResponse(),
await self.do_roarequest_async('ValidateOrderBuy', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/orderBuy/validate', 'json', req, runtime)
)
def renew_tenant_order(
self,
request: dingtalkyida__1__0_models.RenewTenantOrderRequest,
) -> dingtalkyida__1__0_models.RenewTenantOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewTenantOrderHeaders()
return self.renew_tenant_order_with_options(request, headers, runtime)
async def renew_tenant_order_async(
self,
request: dingtalkyida__1__0_models.RenewTenantOrderRequest,
) -> dingtalkyida__1__0_models.RenewTenantOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewTenantOrderHeaders()
return await self.renew_tenant_order_with_options_async(request, headers, runtime)
def renew_tenant_order_with_options(
self,
request: dingtalkyida__1__0_models.RenewTenantOrderRequest,
headers: dingtalkyida__1__0_models.RenewTenantOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewTenantOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewTenantOrderResponse(),
self.do_roarequest('RenewTenantOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/tenants/reorder', 'json', req, runtime)
)
async def renew_tenant_order_with_options_async(
self,
request: dingtalkyida__1__0_models.RenewTenantOrderRequest,
headers: dingtalkyida__1__0_models.RenewTenantOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewTenantOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewTenantOrderResponse(),
await self.do_roarequest_async('RenewTenantOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/tenants/reorder', 'json', req, runtime)
)
def get_print_dictionary(
self,
request: dingtalkyida__1__0_models.GetPrintDictionaryRequest,
) -> dingtalkyida__1__0_models.GetPrintDictionaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPrintDictionaryHeaders()
return self.get_print_dictionary_with_options(request, headers, runtime)
async def get_print_dictionary_async(
self,
request: dingtalkyida__1__0_models.GetPrintDictionaryRequest,
) -> dingtalkyida__1__0_models.GetPrintDictionaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPrintDictionaryHeaders()
return await self.get_print_dictionary_with_options_async(request, headers, runtime)
def get_print_dictionary_with_options(
self,
request: dingtalkyida__1__0_models.GetPrintDictionaryRequest,
headers: dingtalkyida__1__0_models.GetPrintDictionaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPrintDictionaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.form_uuid):
query['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.version):
query['version'] = request.version
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPrintDictionaryResponse(),
self.do_roarequest('GetPrintDictionary', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/printTemplates/printDictionaries', 'json', req, runtime)
)
async def get_print_dictionary_with_options_async(
self,
request: dingtalkyida__1__0_models.GetPrintDictionaryRequest,
headers: dingtalkyida__1__0_models.GetPrintDictionaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPrintDictionaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.form_uuid):
query['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.version):
query['version'] = request.version
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPrintDictionaryResponse(),
await self.do_roarequest_async('GetPrintDictionary', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/printTemplates/printDictionaries', 'json', req, runtime)
)
def update_instance(
self,
request: dingtalkyida__1__0_models.UpdateInstanceRequest,
) -> dingtalkyida__1__0_models.UpdateInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateInstanceHeaders()
return self.update_instance_with_options(request, headers, runtime)
async def update_instance_async(
self,
request: dingtalkyida__1__0_models.UpdateInstanceRequest,
) -> dingtalkyida__1__0_models.UpdateInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateInstanceHeaders()
return await self.update_instance_with_options_async(request, headers, runtime)
def update_instance_with_options(
self,
request: dingtalkyida__1__0_models.UpdateInstanceRequest,
headers: dingtalkyida__1__0_models.UpdateInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateInstanceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.update_form_data_json):
body['updateFormDataJson'] = request.update_form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateInstanceResponse(),
self.do_roarequest('UpdateInstance', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/processes/instances', 'none', req, runtime)
)
async def update_instance_with_options_async(
self,
request: dingtalkyida__1__0_models.UpdateInstanceRequest,
headers: dingtalkyida__1__0_models.UpdateInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateInstanceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.update_form_data_json):
body['updateFormDataJson'] = request.update_form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateInstanceResponse(),
await self.do_roarequest_async('UpdateInstance', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/processes/instances', 'none', req, runtime)
)
def buy_authorization_order(
self,
request: dingtalkyida__1__0_models.BuyAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.BuyAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.BuyAuthorizationOrderHeaders()
return self.buy_authorization_order_with_options(request, headers, runtime)
async def buy_authorization_order_async(
self,
request: dingtalkyida__1__0_models.BuyAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.BuyAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.BuyAuthorizationOrderHeaders()
return await self.buy_authorization_order_with_options_async(request, headers, runtime)
def buy_authorization_order_with_options(
self,
request: dingtalkyida__1__0_models.BuyAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.BuyAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.BuyAuthorizationOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.BuyAuthorizationOrderResponse(),
self.do_roarequest('BuyAuthorizationOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/appAuthorizations/order', 'json', req, runtime)
)
async def buy_authorization_order_with_options_async(
self,
request: dingtalkyida__1__0_models.BuyAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.BuyAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.BuyAuthorizationOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.BuyAuthorizationOrderResponse(),
await self.do_roarequest_async('BuyAuthorizationOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/appAuthorizations/order', 'json', req, runtime)
)
def validate_application_service_order_upgrade(
self,
caller_unionid: str,
request: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeHeaders()
return self.validate_application_service_order_upgrade_with_options(caller_unionid, request, headers, runtime)
async def validate_application_service_order_upgrade_async(
self,
caller_unionid: str,
request: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeHeaders()
return await self.validate_application_service_order_upgrade_with_options_async(caller_unionid, request, headers, runtime)
def validate_application_service_order_upgrade_with_options(
self,
caller_unionid: str,
request: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse(),
self.do_roarequest('ValidateApplicationServiceOrderUpgrade', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applications/orderValidations/{caller_unionid}', 'json', req, runtime)
)
async def validate_application_service_order_upgrade_with_options_async(
self,
caller_unionid: str,
request: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationServiceOrderUpgradeResponse(),
await self.do_roarequest_async('ValidateApplicationServiceOrderUpgrade', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applications/orderValidations/{caller_unionid}', 'json', req, runtime)
)
def get_corp_tasks(
self,
request: dingtalkyida__1__0_models.GetCorpTasksRequest,
) -> dingtalkyida__1__0_models.GetCorpTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpTasksHeaders()
return self.get_corp_tasks_with_options(request, headers, runtime)
async def get_corp_tasks_async(
self,
request: dingtalkyida__1__0_models.GetCorpTasksRequest,
) -> dingtalkyida__1__0_models.GetCorpTasksResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpTasksHeaders()
return await self.get_corp_tasks_with_options_async(request, headers, runtime)
def get_corp_tasks_with_options(
self,
request: dingtalkyida__1__0_models.GetCorpTasksRequest,
headers: dingtalkyida__1__0_models.GetCorpTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpTasksResponse(),
self.do_roarequest('GetCorpTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpTasks', 'json', req, runtime)
)
async def get_corp_tasks_with_options_async(
self,
request: dingtalkyida__1__0_models.GetCorpTasksRequest,
headers: dingtalkyida__1__0_models.GetCorpTasksHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpTasksResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpTasksResponse(),
await self.do_roarequest_async('GetCorpTasks', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpTasks', 'json', req, runtime)
)
def list_commodity(
self,
request: dingtalkyida__1__0_models.ListCommodityRequest,
) -> dingtalkyida__1__0_models.ListCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListCommodityHeaders()
return self.list_commodity_with_options(request, headers, runtime)
async def list_commodity_async(
self,
request: dingtalkyida__1__0_models.ListCommodityRequest,
) -> dingtalkyida__1__0_models.ListCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListCommodityHeaders()
return await self.list_commodity_with_options_async(request, headers, runtime)
def list_commodity_with_options(
self,
request: dingtalkyida__1__0_models.ListCommodityRequest,
headers: dingtalkyida__1__0_models.ListCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListCommodityResponse(),
self.do_roarequest('ListCommodity', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appAuth/commodities', 'json', req, runtime)
)
async def list_commodity_with_options_async(
self,
request: dingtalkyida__1__0_models.ListCommodityRequest,
headers: dingtalkyida__1__0_models.ListCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListCommodityResponse(),
await self.do_roarequest_async('ListCommodity', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appAuth/commodities', 'json', req, runtime)
)
def notify_authorization_result(
self,
request: dingtalkyida__1__0_models.NotifyAuthorizationResultRequest,
) -> dingtalkyida__1__0_models.NotifyAuthorizationResultResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.NotifyAuthorizationResultHeaders()
return self.notify_authorization_result_with_options(request, headers, runtime)
async def notify_authorization_result_async(
self,
request: dingtalkyida__1__0_models.NotifyAuthorizationResultRequest,
) -> dingtalkyida__1__0_models.NotifyAuthorizationResultResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.NotifyAuthorizationResultHeaders()
return await self.notify_authorization_result_with_options_async(request, headers, runtime)
def notify_authorization_result_with_options(
self,
request: dingtalkyida__1__0_models.NotifyAuthorizationResultRequest,
headers: dingtalkyida__1__0_models.NotifyAuthorizationResultHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.NotifyAuthorizationResultResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.caller_uid):
body['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.NotifyAuthorizationResultResponse(),
self.do_roarequest('NotifyAuthorizationResult', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/authorizationResults/notify', 'json', req, runtime)
)
async def notify_authorization_result_with_options_async(
self,
request: dingtalkyida__1__0_models.NotifyAuthorizationResultRequest,
headers: dingtalkyida__1__0_models.NotifyAuthorizationResultHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.NotifyAuthorizationResultResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.caller_uid):
body['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.NotifyAuthorizationResultResponse(),
await self.do_roarequest_async('NotifyAuthorizationResult', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/authorizationResults/notify', 'json', req, runtime)
)
def buy_fresh_order(
self,
request: dingtalkyida__1__0_models.BuyFreshOrderRequest,
) -> dingtalkyida__1__0_models.BuyFreshOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.BuyFreshOrderHeaders()
return self.buy_fresh_order_with_options(request, headers, runtime)
async def buy_fresh_order_async(
self,
request: dingtalkyida__1__0_models.BuyFreshOrderRequest,
) -> dingtalkyida__1__0_models.BuyFreshOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.BuyFreshOrderHeaders()
return await self.buy_fresh_order_with_options_async(request, headers, runtime)
def buy_fresh_order_with_options(
self,
request: dingtalkyida__1__0_models.BuyFreshOrderRequest,
headers: dingtalkyida__1__0_models.BuyFreshOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.BuyFreshOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.BuyFreshOrderResponse(),
self.do_roarequest('BuyFreshOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/freshOrders', 'json', req, runtime)
)
async def buy_fresh_order_with_options_async(
self,
request: dingtalkyida__1__0_models.BuyFreshOrderRequest,
headers: dingtalkyida__1__0_models.BuyFreshOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.BuyFreshOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.produce_code):
body['produceCode'] = request.produce_code
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
body['instanceName'] = request.instance_name
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.charge_type):
body['chargeType'] = request.charge_type
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
if not UtilClient.is_unset(request.begin_time_gmt):
body['beginTimeGMT'] = request.begin_time_gmt
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commerce_type):
body['commerceType'] = request.commerce_type
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.BuyFreshOrderResponse(),
await self.do_roarequest_async('BuyFreshOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/freshOrders', 'json', req, runtime)
)
def remove_tenant_resource(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.RemoveTenantResourceRequest,
) -> dingtalkyida__1__0_models.RemoveTenantResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RemoveTenantResourceHeaders()
return self.remove_tenant_resource_with_options(caller_uid, request, headers, runtime)
async def remove_tenant_resource_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.RemoveTenantResourceRequest,
) -> dingtalkyida__1__0_models.RemoveTenantResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RemoveTenantResourceHeaders()
return await self.remove_tenant_resource_with_options_async(caller_uid, request, headers, runtime)
def remove_tenant_resource_with_options(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.RemoveTenantResourceRequest,
headers: dingtalkyida__1__0_models.RemoveTenantResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RemoveTenantResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RemoveTenantResourceResponse(),
self.do_roarequest('RemoveTenantResource', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/applications/tenantRelatedResources/{caller_uid}', 'json', req, runtime)
)
async def remove_tenant_resource_with_options_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.RemoveTenantResourceRequest,
headers: dingtalkyida__1__0_models.RemoveTenantResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RemoveTenantResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RemoveTenantResourceResponse(),
await self.do_roarequest_async('RemoveTenantResource', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/applications/tenantRelatedResources/{caller_uid}', 'json', req, runtime)
)
def renew_application_authorization_service_order(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders()
return self.renew_application_authorization_service_order_with_options(request, headers, runtime)
async def renew_application_authorization_service_order_async(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders()
return await self.renew_application_authorization_service_order_with_options_async(request, headers, runtime)
def renew_application_authorization_service_order_with_options(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse(),
self.do_roarequest('RenewApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/orders/renew', 'json', req, runtime)
)
async def renew_application_authorization_service_order_with_options_async(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse(),
await self.do_roarequest_async('RenewApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/orders/renew', 'json', req, runtime)
)
def get_process_definition(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetProcessDefinitionHeaders()
return self.get_process_definition_with_options(process_instance_id, request, headers, runtime)
async def get_process_definition_async(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetProcessDefinitionHeaders()
return await self.get_process_definition_with_options_async(process_instance_id, request, headers, runtime)
def get_process_definition_with_options(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
headers: dingtalkyida__1__0_models.GetProcessDefinitionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.group_id):
query['groupId'] = request.group_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.order_number):
query['orderNumber'] = request.order_number
if not UtilClient.is_unset(request.system_type):
query['systemType'] = request.system_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.name_space):
query['nameSpace'] = request.name_space
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetProcessDefinitionResponse(),
self.do_roarequest('GetProcessDefinition', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/definitions/{process_instance_id}', 'json', req, runtime)
)
async def get_process_definition_with_options_async(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
headers: dingtalkyida__1__0_models.GetProcessDefinitionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.group_id):
query['groupId'] = request.group_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.order_number):
query['orderNumber'] = request.order_number
if not UtilClient.is_unset(request.system_type):
query['systemType'] = request.system_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.name_space):
query['nameSpace'] = request.name_space
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetProcessDefinitionResponse(),
await self.do_roarequest_async('GetProcessDefinition', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/definitions/{process_instance_id}', 'json', req, runtime)
)
def upgrade_tenant_information(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpgradeTenantInformationHeaders()
return self.upgrade_tenant_information_with_options(request, headers, runtime)
async def upgrade_tenant_information_async(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpgradeTenantInformationHeaders()
return await self.upgrade_tenant_information_with_options_async(request, headers, runtime)
def upgrade_tenant_information_with_options(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
headers: dingtalkyida__1__0_models.UpgradeTenantInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpgradeTenantInformationResponse(),
self.do_roarequest('UpgradeTenantInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/tenantInfos', 'json', req, runtime)
)
async def upgrade_tenant_information_with_options_async(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
headers: dingtalkyida__1__0_models.UpgradeTenantInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpgradeTenantInformationResponse(),
await self.do_roarequest_async('UpgradeTenantInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/tenantInfos', 'json', req, runtime)
)
def get_application_authorization_service_platform_resource(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders()
return self.get_application_authorization_service_platform_resource_with_options(request, headers, runtime)
async def get_application_authorization_service_platform_resource_async(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders()
return await self.get_application_authorization_service_platform_resource_with_options_async(request, headers, runtime)
def get_application_authorization_service_platform_resource_with_options(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse(),
self.do_roarequest('GetApplicationAuthorizationServicePlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorization/platformResources', 'json', req, runtime)
)
async def get_application_authorization_service_platform_resource_with_options_async(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse(),
await self.do_roarequest_async('GetApplicationAuthorizationServicePlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorization/platformResources', 'json', req, runtime)
)
def list_application_authorization_service_application_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders()
return self.list_application_authorization_service_application_information_with_options(instance_id, request, headers, runtime)
async def list_application_authorization_service_application_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders()
return await self.list_application_authorization_service_application_information_with_options_async(instance_id, request, headers, runtime)
def list_application_authorization_service_application_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse(),
self.do_roarequest('ListApplicationAuthorizationServiceApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorizations/applicationInfos/{instance_id}', 'json', req, runtime)
)
async def list_application_authorization_service_application_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse(),
await self.do_roarequest_async('ListApplicationAuthorizationServiceApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorizations/applicationInfos/{instance_id}', 'json', req, runtime)
)
def validate_application_authorization_service_order(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders()
return self.validate_application_authorization_service_order_with_options(caller_uid, request, headers, runtime)
async def validate_application_authorization_service_order_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders()
return await self.validate_application_authorization_service_order_with_options_async(caller_uid, request, headers, runtime)
def validate_application_authorization_service_order_with_options(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse(),
self.do_roarequest('ValidateApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appsAuthorizations/freshOrderInfoReviews/{caller_uid}', 'json', req, runtime)
)
async def validate_application_authorization_service_order_with_options_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse(),
await self.do_roarequest_async('ValidateApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appsAuthorizations/freshOrderInfoReviews/{caller_uid}', 'json', req, runtime)
)
def get_activity_list(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityListHeaders()
return self.get_activity_list_with_options(request, headers, runtime)
async def get_activity_list_async(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityListHeaders()
return await self.get_activity_list_with_options_async(request, headers, runtime)
def get_activity_list_with_options(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
headers: dingtalkyida__1__0_models.GetActivityListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.process_code):
query['processCode'] = request.process_code
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivityListResponse(),
self.do_roarequest('GetActivityList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/activities', 'json', req, runtime)
)
async def get_activity_list_with_options_async(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
headers: dingtalkyida__1__0_models.GetActivityListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.process_code):
query['processCode'] = request.process_code
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivityListResponse(),
await self.do_roarequest_async('GetActivityList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/activities', 'json', req, runtime)
)
def execute_custom_api(
self,
request: dingtalkyida__1__0_models.ExecuteCustomApiRequest,
) -> dingtalkyida__1__0_models.ExecuteCustomApiResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteCustomApiHeaders()
return self.execute_custom_api_with_options(request, headers, runtime)
async def execute_custom_api_async(
self,
request: dingtalkyida__1__0_models.ExecuteCustomApiRequest,
) -> dingtalkyida__1__0_models.ExecuteCustomApiResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteCustomApiHeaders()
return await self.execute_custom_api_with_options_async(request, headers, runtime)
def execute_custom_api_with_options(
self,
request: dingtalkyida__1__0_models.ExecuteCustomApiRequest,
headers: dingtalkyida__1__0_models.ExecuteCustomApiHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteCustomApiResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.data):
query['data'] = request.data
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.service_id):
query['serviceId'] = request.service_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteCustomApiResponse(),
self.do_roarequest('ExecuteCustomApi', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/customApi/execute', 'json', req, runtime)
)
async def execute_custom_api_with_options_async(
self,
request: dingtalkyida__1__0_models.ExecuteCustomApiRequest,
headers: dingtalkyida__1__0_models.ExecuteCustomApiHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteCustomApiResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.data):
query['data'] = request.data
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.service_id):
query['serviceId'] = request.service_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteCustomApiResponse(),
await self.do_roarequest_async('ExecuteCustomApi', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/customApi/execute', 'json', req, runtime)
)
def login_code_gen(
self,
request: dingtalkyida__1__0_models.LoginCodeGenRequest,
) -> dingtalkyida__1__0_models.LoginCodeGenResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.LoginCodeGenHeaders()
return self.login_code_gen_with_options(request, headers, runtime)
async def login_code_gen_async(
self,
request: dingtalkyida__1__0_models.LoginCodeGenRequest,
) -> dingtalkyida__1__0_models.LoginCodeGenResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.LoginCodeGenHeaders()
return await self.login_code_gen_with_options_async(request, headers, runtime)
def login_code_gen_with_options(
self,
request: dingtalkyida__1__0_models.LoginCodeGenRequest,
headers: dingtalkyida__1__0_models.LoginCodeGenHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.LoginCodeGenResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.LoginCodeGenResponse(),
self.do_roarequest('LoginCodeGen', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/authorizations/loginCodes', 'json', req, runtime)
)
async def login_code_gen_with_options_async(
self,
request: dingtalkyida__1__0_models.LoginCodeGenRequest,
headers: dingtalkyida__1__0_models.LoginCodeGenHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.LoginCodeGenResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.LoginCodeGenResponse(),
await self.do_roarequest_async('LoginCodeGen', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/authorizations/loginCodes', 'json', req, runtime)
)
def terminate_cloud_authorization(
self,
request: dingtalkyida__1__0_models.TerminateCloudAuthorizationRequest,
) -> dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.TerminateCloudAuthorizationHeaders()
return self.terminate_cloud_authorization_with_options(request, headers, runtime)
async def terminate_cloud_authorization_async(
self,
request: dingtalkyida__1__0_models.TerminateCloudAuthorizationRequest,
) -> dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.TerminateCloudAuthorizationHeaders()
return await self.terminate_cloud_authorization_with_options_async(request, headers, runtime)
def terminate_cloud_authorization_with_options(
self,
request: dingtalkyida__1__0_models.TerminateCloudAuthorizationRequest,
headers: dingtalkyida__1__0_models.TerminateCloudAuthorizationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse(),
self.do_roarequest('TerminateCloudAuthorization', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/cloudAuthorizations/terminate', 'json', req, runtime)
)
async def terminate_cloud_authorization_with_options_async(
self,
request: dingtalkyida__1__0_models.TerminateCloudAuthorizationRequest,
headers: dingtalkyida__1__0_models.TerminateCloudAuthorizationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.TerminateCloudAuthorizationResponse(),
await self.do_roarequest_async('TerminateCloudAuthorization', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/apps/cloudAuthorizations/terminate', 'json', req, runtime)
)
def get_activity_button_list(
self,
app_type: str,
process_code: str,
activity_id: str,
request: dingtalkyida__1__0_models.GetActivityButtonListRequest,
) -> dingtalkyida__1__0_models.GetActivityButtonListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityButtonListHeaders()
return self.get_activity_button_list_with_options(app_type, process_code, activity_id, request, headers, runtime)
async def get_activity_button_list_async(
self,
app_type: str,
process_code: str,
activity_id: str,
request: dingtalkyida__1__0_models.GetActivityButtonListRequest,
) -> dingtalkyida__1__0_models.GetActivityButtonListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityButtonListHeaders()
return await self.get_activity_button_list_with_options_async(app_type, process_code, activity_id, request, headers, runtime)
def get_activity_button_list_with_options(
self,
app_type: str,
process_code: str,
activity_id: str,
request: dingtalkyida__1__0_models.GetActivityButtonListRequest,
headers: dingtalkyida__1__0_models.GetActivityButtonListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivityButtonListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivityButtonListResponse(),
self.do_roarequest('GetActivityButtonList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processDefinitions/buttons/{app_type}/{process_code}/{activity_id}', 'json', req, runtime)
)
async def get_activity_button_list_with_options_async(
self,
app_type: str,
process_code: str,
activity_id: str,
request: dingtalkyida__1__0_models.GetActivityButtonListRequest,
headers: dingtalkyida__1__0_models.GetActivityButtonListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivityButtonListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivityButtonListResponse(),
await self.do_roarequest_async('GetActivityButtonList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processDefinitions/buttons/{app_type}/{process_code}/{activity_id}', 'json', req, runtime)
)
def start_instance(
self,
request: dingtalkyida__1__0_models.StartInstanceRequest,
) -> dingtalkyida__1__0_models.StartInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.StartInstanceHeaders()
return self.start_instance_with_options(request, headers, runtime)
async def start_instance_async(
self,
request: dingtalkyida__1__0_models.StartInstanceRequest,
) -> dingtalkyida__1__0_models.StartInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.StartInstanceHeaders()
return await self.start_instance_with_options_async(request, headers, runtime)
def start_instance_with_options(
self,
request: dingtalkyida__1__0_models.StartInstanceRequest,
headers: dingtalkyida__1__0_models.StartInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.StartInstanceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.process_code):
body['processCode'] = request.process_code
if not UtilClient.is_unset(request.department_id):
body['departmentId'] = request.department_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.StartInstanceResponse(),
self.do_roarequest('StartInstance', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instances/start', 'json', req, runtime)
)
async def start_instance_with_options_async(
self,
request: dingtalkyida__1__0_models.StartInstanceRequest,
headers: dingtalkyida__1__0_models.StartInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.StartInstanceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.process_code):
body['processCode'] = request.process_code
if not UtilClient.is_unset(request.department_id):
body['departmentId'] = request.department_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.StartInstanceResponse(),
await self.do_roarequest_async('StartInstance', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instances/start', 'json', req, runtime)
)
def list_application_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationInformationHeaders()
return self.list_application_information_with_options(instance_id, request, headers, runtime)
async def list_application_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationInformationHeaders()
return await self.list_application_information_with_options_async(instance_id, request, headers, runtime)
def list_application_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationInformationResponse(),
self.do_roarequest('ListApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/infos/{instance_id}', 'json', req, runtime)
)
async def list_application_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationInformationResponse(),
await self.do_roarequest_async('ListApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/infos/{instance_id}', 'json', req, runtime)
)
def validate_order_upgrade(
self,
request: dingtalkyida__1__0_models.ValidateOrderUpgradeRequest,
) -> dingtalkyida__1__0_models.ValidateOrderUpgradeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderUpgradeHeaders()
return self.validate_order_upgrade_with_options(request, headers, runtime)
async def validate_order_upgrade_async(
self,
request: dingtalkyida__1__0_models.ValidateOrderUpgradeRequest,
) -> dingtalkyida__1__0_models.ValidateOrderUpgradeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderUpgradeHeaders()
return await self.validate_order_upgrade_with_options_async(request, headers, runtime)
def validate_order_upgrade_with_options(
self,
request: dingtalkyida__1__0_models.ValidateOrderUpgradeRequest,
headers: dingtalkyida__1__0_models.ValidateOrderUpgradeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderUpgradeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderUpgradeResponse(),
self.do_roarequest('ValidateOrderUpgrade', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/orderUpgrade/validate', 'json', req, runtime)
)
async def validate_order_upgrade_with_options_async(
self,
request: dingtalkyida__1__0_models.ValidateOrderUpgradeRequest,
headers: dingtalkyida__1__0_models.ValidateOrderUpgradeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderUpgradeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderUpgradeResponse(),
await self.do_roarequest_async('ValidateOrderUpgrade', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/orderUpgrade/validate', 'json', req, runtime)
)
def update_cloud_account_information(
self,
request: dingtalkyida__1__0_models.UpdateCloudAccountInformationRequest,
) -> dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateCloudAccountInformationHeaders()
return self.update_cloud_account_information_with_options(request, headers, runtime)
async def update_cloud_account_information_async(
self,
request: dingtalkyida__1__0_models.UpdateCloudAccountInformationRequest,
) -> dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateCloudAccountInformationHeaders()
return await self.update_cloud_account_information_with_options_async(request, headers, runtime)
def update_cloud_account_information_with_options(
self,
request: dingtalkyida__1__0_models.UpdateCloudAccountInformationRequest,
headers: dingtalkyida__1__0_models.UpdateCloudAccountInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse(),
self.do_roarequest('UpdateCloudAccountInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/cloudAccountInfos', 'json', req, runtime)
)
async def update_cloud_account_information_with_options_async(
self,
request: dingtalkyida__1__0_models.UpdateCloudAccountInformationRequest,
headers: dingtalkyida__1__0_models.UpdateCloudAccountInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateCloudAccountInformationResponse(),
await self.do_roarequest_async('UpdateCloudAccountInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/cloudAccountInfos', 'json', req, runtime)
)
def get_corp_level_by_account_id(
self,
request: dingtalkyida__1__0_models.GetCorpLevelByAccountIdRequest,
) -> dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpLevelByAccountIdHeaders()
return self.get_corp_level_by_account_id_with_options(request, headers, runtime)
async def get_corp_level_by_account_id_async(
self,
request: dingtalkyida__1__0_models.GetCorpLevelByAccountIdRequest,
) -> dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetCorpLevelByAccountIdHeaders()
return await self.get_corp_level_by_account_id_with_options_async(request, headers, runtime)
def get_corp_level_by_account_id_with_options(
self,
request: dingtalkyida__1__0_models.GetCorpLevelByAccountIdRequest,
headers: dingtalkyida__1__0_models.GetCorpLevelByAccountIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.account_id):
query['accountId'] = request.account_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse(),
self.do_roarequest('GetCorpLevelByAccountId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/corpLevel', 'json', req, runtime)
)
async def get_corp_level_by_account_id_with_options_async(
self,
request: dingtalkyida__1__0_models.GetCorpLevelByAccountIdRequest,
headers: dingtalkyida__1__0_models.GetCorpLevelByAccountIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.account_id):
query['accountId'] = request.account_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetCorpLevelByAccountIdResponse(),
await self.do_roarequest_async('GetCorpLevelByAccountId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/corpLevel', 'json', req, runtime)
)
def execute_platform_task(
self,
request: dingtalkyida__1__0_models.ExecutePlatformTaskRequest,
) -> dingtalkyida__1__0_models.ExecutePlatformTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecutePlatformTaskHeaders()
return self.execute_platform_task_with_options(request, headers, runtime)
async def execute_platform_task_async(
self,
request: dingtalkyida__1__0_models.ExecutePlatformTaskRequest,
) -> dingtalkyida__1__0_models.ExecutePlatformTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecutePlatformTaskHeaders()
return await self.execute_platform_task_with_options_async(request, headers, runtime)
def execute_platform_task_with_options(
self,
request: dingtalkyida__1__0_models.ExecutePlatformTaskRequest,
headers: dingtalkyida__1__0_models.ExecutePlatformTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecutePlatformTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecutePlatformTaskResponse(),
self.do_roarequest('ExecutePlatformTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/platformTasks/execute', 'none', req, runtime)
)
async def execute_platform_task_with_options_async(
self,
request: dingtalkyida__1__0_models.ExecutePlatformTaskRequest,
headers: dingtalkyida__1__0_models.ExecutePlatformTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecutePlatformTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecutePlatformTaskResponse(),
await self.do_roarequest_async('ExecutePlatformTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/platformTasks/execute', 'none', req, runtime)
)
def search_form_datas(
self,
request: dingtalkyida__1__0_models.SearchFormDatasRequest,
) -> dingtalkyida__1__0_models.SearchFormDatasResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchFormDatasHeaders()
return self.search_form_datas_with_options(request, headers, runtime)
async def search_form_datas_async(
self,
request: dingtalkyida__1__0_models.SearchFormDatasRequest,
) -> dingtalkyida__1__0_models.SearchFormDatasResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchFormDatasHeaders()
return await self.search_form_datas_with_options_async(request, headers, runtime)
def search_form_datas_with_options(
self,
request: dingtalkyida__1__0_models.SearchFormDatasRequest,
headers: dingtalkyida__1__0_models.SearchFormDatasHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchFormDatasResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.current_page):
body['currentPage'] = request.current_page
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.dynamic_order):
body['dynamicOrder'] = request.dynamic_order
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchFormDatasResponse(),
self.do_roarequest('SearchFormDatas', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances/search', 'json', req, runtime)
)
async def search_form_datas_with_options_async(
self,
request: dingtalkyida__1__0_models.SearchFormDatasRequest,
headers: dingtalkyida__1__0_models.SearchFormDatasHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchFormDatasResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.current_page):
body['currentPage'] = request.current_page
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.dynamic_order):
body['dynamicOrder'] = request.dynamic_order
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchFormDatasResponse(),
await self.do_roarequest_async('SearchFormDatas', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances/search', 'json', req, runtime)
)
def search_activation_code(
self,
request: dingtalkyida__1__0_models.SearchActivationCodeRequest,
) -> dingtalkyida__1__0_models.SearchActivationCodeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchActivationCodeHeaders()
return self.search_activation_code_with_options(request, headers, runtime)
async def search_activation_code_async(
self,
request: dingtalkyida__1__0_models.SearchActivationCodeRequest,
) -> dingtalkyida__1__0_models.SearchActivationCodeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchActivationCodeHeaders()
return await self.search_activation_code_with_options_async(request, headers, runtime)
def search_activation_code_with_options(
self,
request: dingtalkyida__1__0_models.SearchActivationCodeRequest,
headers: dingtalkyida__1__0_models.SearchActivationCodeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchActivationCodeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchActivationCodeResponse(),
self.do_roarequest('SearchActivationCode', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/activationCode/information', 'json', req, runtime)
)
async def search_activation_code_with_options_async(
self,
request: dingtalkyida__1__0_models.SearchActivationCodeRequest,
headers: dingtalkyida__1__0_models.SearchActivationCodeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchActivationCodeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchActivationCodeResponse(),
await self.do_roarequest_async('SearchActivationCode', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/activationCode/information', 'json', req, runtime)
)
def save_print_tpl_detail_info(
self,
request: dingtalkyida__1__0_models.SavePrintTplDetailInfoRequest,
) -> dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SavePrintTplDetailInfoHeaders()
return self.save_print_tpl_detail_info_with_options(request, headers, runtime)
async def save_print_tpl_detail_info_async(
self,
request: dingtalkyida__1__0_models.SavePrintTplDetailInfoRequest,
) -> dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SavePrintTplDetailInfoHeaders()
return await self.save_print_tpl_detail_info_with_options_async(request, headers, runtime)
def save_print_tpl_detail_info_with_options(
self,
request: dingtalkyida__1__0_models.SavePrintTplDetailInfoRequest,
headers: dingtalkyida__1__0_models.SavePrintTplDetailInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.vm):
body['vm'] = request.vm
if not UtilClient.is_unset(request.form_version):
body['formVersion'] = request.form_version
if not UtilClient.is_unset(request.template_id):
body['templateId'] = request.template_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.setting):
body['setting'] = request.setting
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.description):
body['description'] = request.description
if not UtilClient.is_unset(request.file_name_config):
body['fileNameConfig'] = request.file_name_config
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse(),
self.do_roarequest('SavePrintTplDetailInfo', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printTemplates/printTplDetailInfos', 'json', req, runtime)
)
async def save_print_tpl_detail_info_with_options_async(
self,
request: dingtalkyida__1__0_models.SavePrintTplDetailInfoRequest,
headers: dingtalkyida__1__0_models.SavePrintTplDetailInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.vm):
body['vm'] = request.vm
if not UtilClient.is_unset(request.form_version):
body['formVersion'] = request.form_version
if not UtilClient.is_unset(request.template_id):
body['templateId'] = request.template_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.setting):
body['setting'] = request.setting
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.description):
body['description'] = request.description
if not UtilClient.is_unset(request.file_name_config):
body['fileNameConfig'] = request.file_name_config
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SavePrintTplDetailInfoResponse(),
await self.do_roarequest_async('SavePrintTplDetailInfo', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printTemplates/printTplDetailInfos', 'json', req, runtime)
)
def search_employee_field_values(
self,
request: dingtalkyida__1__0_models.SearchEmployeeFieldValuesRequest,
) -> dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchEmployeeFieldValuesHeaders()
return self.search_employee_field_values_with_options(request, headers, runtime)
async def search_employee_field_values_async(
self,
request: dingtalkyida__1__0_models.SearchEmployeeFieldValuesRequest,
) -> dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchEmployeeFieldValuesHeaders()
return await self.search_employee_field_values_with_options_async(request, headers, runtime)
def search_employee_field_values_with_options(
self,
request: dingtalkyida__1__0_models.SearchEmployeeFieldValuesRequest,
headers: dingtalkyida__1__0_models.SearchEmployeeFieldValuesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.target_field_json):
body['targetFieldJson'] = request.target_field_json
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse(),
self.do_roarequest('SearchEmployeeFieldValues', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/employeeFields', 'json', req, runtime)
)
async def search_employee_field_values_with_options_async(
self,
request: dingtalkyida__1__0_models.SearchEmployeeFieldValuesRequest,
headers: dingtalkyida__1__0_models.SearchEmployeeFieldValuesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.target_field_json):
body['targetFieldJson'] = request.target_field_json
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchEmployeeFieldValuesResponse(),
await self.do_roarequest_async('SearchEmployeeFieldValues', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/employeeFields', 'json', req, runtime)
)
def update_form_data(
self,
request: dingtalkyida__1__0_models.UpdateFormDataRequest,
) -> dingtalkyida__1__0_models.UpdateFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateFormDataHeaders()
return self.update_form_data_with_options(request, headers, runtime)
async def update_form_data_async(
self,
request: dingtalkyida__1__0_models.UpdateFormDataRequest,
) -> dingtalkyida__1__0_models.UpdateFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpdateFormDataHeaders()
return await self.update_form_data_with_options_async(request, headers, runtime)
def update_form_data_with_options(
self,
request: dingtalkyida__1__0_models.UpdateFormDataRequest,
headers: dingtalkyida__1__0_models.UpdateFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateFormDataResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
body['formInstanceId'] = request.form_instance_id
if not UtilClient.is_unset(request.use_latest_version):
body['useLatestVersion'] = request.use_latest_version
if not UtilClient.is_unset(request.update_form_data_json):
body['updateFormDataJson'] = request.update_form_data_json
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateFormDataResponse(),
self.do_roarequest('UpdateFormData', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/forms/instances', 'none', req, runtime)
)
async def update_form_data_with_options_async(
self,
request: dingtalkyida__1__0_models.UpdateFormDataRequest,
headers: dingtalkyida__1__0_models.UpdateFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpdateFormDataResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
body['formInstanceId'] = request.form_instance_id
if not UtilClient.is_unset(request.use_latest_version):
body['useLatestVersion'] = request.use_latest_version
if not UtilClient.is_unset(request.update_form_data_json):
body['updateFormDataJson'] = request.update_form_data_json
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpdateFormDataResponse(),
await self.do_roarequest_async('UpdateFormData', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/forms/instances', 'none', req, runtime)
)
def get_instance_id_list(
self,
request: dingtalkyida__1__0_models.GetInstanceIdListRequest,
) -> dingtalkyida__1__0_models.GetInstanceIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceIdListHeaders()
return self.get_instance_id_list_with_options(request, headers, runtime)
async def get_instance_id_list_async(
self,
request: dingtalkyida__1__0_models.GetInstanceIdListRequest,
) -> dingtalkyida__1__0_models.GetInstanceIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceIdListHeaders()
return await self.get_instance_id_list_with_options_async(request, headers, runtime)
def get_instance_id_list_with_options(
self,
request: dingtalkyida__1__0_models.GetInstanceIdListRequest,
headers: dingtalkyida__1__0_models.GetInstanceIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
body = {}
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.instance_status):
body['instanceStatus'] = request.instance_status
if not UtilClient.is_unset(request.approved_result):
body['approvedResult'] = request.approved_result
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceIdListResponse(),
self.do_roarequest('GetInstanceIdList', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instanceIds', 'json', req, runtime)
)
async def get_instance_id_list_with_options_async(
self,
request: dingtalkyida__1__0_models.GetInstanceIdListRequest,
headers: dingtalkyida__1__0_models.GetInstanceIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
body = {}
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.instance_status):
body['instanceStatus'] = request.instance_status
if not UtilClient.is_unset(request.approved_result):
body['approvedResult'] = request.approved_result
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceIdListResponse(),
await self.do_roarequest_async('GetInstanceIdList', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/processes/instanceIds', 'json', req, runtime)
)
def get_operation_records(
self,
request: dingtalkyida__1__0_models.GetOperationRecordsRequest,
) -> dingtalkyida__1__0_models.GetOperationRecordsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOperationRecordsHeaders()
return self.get_operation_records_with_options(request, headers, runtime)
async def get_operation_records_async(
self,
request: dingtalkyida__1__0_models.GetOperationRecordsRequest,
) -> dingtalkyida__1__0_models.GetOperationRecordsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOperationRecordsHeaders()
return await self.get_operation_records_with_options_async(request, headers, runtime)
def get_operation_records_with_options(
self,
request: dingtalkyida__1__0_models.GetOperationRecordsRequest,
headers: dingtalkyida__1__0_models.GetOperationRecordsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOperationRecordsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOperationRecordsResponse(),
self.do_roarequest('GetOperationRecords', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/operationRecords', 'json', req, runtime)
)
async def get_operation_records_with_options_async(
self,
request: dingtalkyida__1__0_models.GetOperationRecordsRequest,
headers: dingtalkyida__1__0_models.GetOperationRecordsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOperationRecordsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOperationRecordsResponse(),
await self.do_roarequest_async('GetOperationRecords', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/operationRecords', 'json', req, runtime)
)
def get_platform_resource(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPlatformResourceHeaders()
return self.get_platform_resource_with_options(request, headers, runtime)
async def get_platform_resource_async(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPlatformResourceHeaders()
return await self.get_platform_resource_with_options_async(request, headers, runtime)
def get_platform_resource_with_options(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetPlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPlatformResourceResponse(),
self.do_roarequest('GetPlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/platformResources', 'json', req, runtime)
)
async def get_platform_resource_with_options_async(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetPlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPlatformResourceResponse(),
await self.do_roarequest_async('GetPlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/platformResources', 'json', req, runtime)
)
def list_connector_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListConnectorInformationHeaders()
return self.list_connector_information_with_options(instance_id, request, headers, runtime)
async def list_connector_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListConnectorInformationHeaders()
return await self.list_connector_information_with_options_async(instance_id, request, headers, runtime)
def list_connector_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListConnectorInformationResponse(),
self.do_roarequest('ListConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/plugins/infos/{instance_id}', 'json', req, runtime)
)
async def list_connector_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListConnectorInformationResponse(),
await self.do_roarequest_async('ListConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/plugins/infos/{instance_id}', 'json', req, runtime)
)
def register_accounts(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RegisterAccountsHeaders()
return self.register_accounts_with_options(request, headers, runtime)
async def register_accounts_async(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RegisterAccountsHeaders()
return await self.register_accounts_with_options_async(request, headers, runtime)
def register_accounts_with_options(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
headers: dingtalkyida__1__0_models.RegisterAccountsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.active_code):
body['activeCode'] = request.active_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RegisterAccountsResponse(),
self.do_roarequest('RegisterAccounts', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/accounts/register', 'json', req, runtime)
)
async def register_accounts_with_options_async(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
headers: dingtalkyida__1__0_models.RegisterAccountsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.active_code):
body['activeCode'] = request.active_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RegisterAccountsResponse(),
await self.do_roarequest_async('RegisterAccounts', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/accounts/register', 'json', req, runtime)
)
def get_notify_me(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetNotifyMeHeaders()
return self.get_notify_me_with_options(user_id, request, headers, runtime)
async def get_notify_me_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetNotifyMeHeaders()
return await self.get_notify_me_with_options_async(user_id, request, headers, runtime)
def get_notify_me_with_options(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
headers: dingtalkyida__1__0_models.GetNotifyMeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.instance_create_from_time_gmt):
query['instanceCreateFromTimeGMT'] = request.instance_create_from_time_gmt
if not UtilClient.is_unset(request.instance_create_to_time_gmt):
query['instanceCreateToTimeGMT'] = request.instance_create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetNotifyMeResponse(),
self.do_roarequest('GetNotifyMe', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpNotifications/{user_id}', 'json', req, runtime)
)
async def get_notify_me_with_options_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
headers: dingtalkyida__1__0_models.GetNotifyMeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.instance_create_from_time_gmt):
query['instanceCreateFromTimeGMT'] = request.instance_create_from_time_gmt
if not UtilClient.is_unset(request.instance_create_to_time_gmt):
query['instanceCreateToTimeGMT'] = request.instance_create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetNotifyMeResponse(),
await self.do_roarequest_async('GetNotifyMe', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpNotifications/{user_id}', 'json', req, runtime)
)
def expire_commodity(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExpireCommodityHeaders()
return self.expire_commodity_with_options(request, headers, runtime)
async def expire_commodity_async(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExpireCommodityHeaders()
return await self.expire_commodity_with_options_async(request, headers, runtime)
def expire_commodity_with_options(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
headers: dingtalkyida__1__0_models.ExpireCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExpireCommodityResponse(),
self.do_roarequest('ExpireCommodity', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/appAuth/commodities/expire', 'json', req, runtime)
)
async def expire_commodity_with_options_async(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
headers: dingtalkyida__1__0_models.ExpireCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExpireCommodityResponse(),
await self.do_roarequest_async('ExpireCommodity', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/appAuth/commodities/expire', 'json', req, runtime)
)
def get_instance_by_id(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceByIdHeaders()
return self.get_instance_by_id_with_options(id, request, headers, runtime)
async def get_instance_by_id_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceByIdHeaders()
return await self.get_instance_by_id_with_options_async(id, request, headers, runtime)
def get_instance_by_id_with_options(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
headers: dingtalkyida__1__0_models.GetInstanceByIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceByIdResponse(),
self.do_roarequest('GetInstanceById', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instancesInfos/{id}', 'json', req, runtime)
)
async def get_instance_by_id_with_options_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
headers: dingtalkyida__1__0_models.GetInstanceByIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceByIdResponse(),
await self.do_roarequest_async('GetInstanceById', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instancesInfos/{id}', 'json', req, runtime)
)
def redirect_task(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RedirectTaskHeaders()
return self.redirect_task_with_options(request, headers, runtime)
async def redirect_task_async(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RedirectTaskHeaders()
return await self.redirect_task_with_options_async(request, headers, runtime)
def redirect_task_with_options(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
headers: dingtalkyida__1__0_models.RedirectTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.by_manager):
body['byManager'] = request.by_manager
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.now_action_executor_id):
body['nowActionExecutorId'] = request.now_action_executor_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RedirectTaskResponse(),
self.do_roarequest('RedirectTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/redirect', 'none', req, runtime)
)
async def redirect_task_with_options_async(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
headers: dingtalkyida__1__0_models.RedirectTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.by_manager):
body['byManager'] = request.by_manager
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.now_action_executor_id):
body['nowActionExecutorId'] = request.now_action_executor_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RedirectTaskResponse(),
await self.do_roarequest_async('RedirectTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/redirect', 'none', req, runtime)
)
def validate_order_update(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateOrderUpdateRequest,
) -> dingtalkyida__1__0_models.ValidateOrderUpdateResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderUpdateHeaders()
return self.validate_order_update_with_options(instance_id, request, headers, runtime)
async def validate_order_update_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateOrderUpdateRequest,
) -> dingtalkyida__1__0_models.ValidateOrderUpdateResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateOrderUpdateHeaders()
return await self.validate_order_update_with_options_async(instance_id, request, headers, runtime)
def validate_order_update_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateOrderUpdateRequest,
headers: dingtalkyida__1__0_models.ValidateOrderUpdateHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderUpdateResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderUpdateResponse(),
self.do_roarequest('ValidateOrderUpdate', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/orders/renewalReviews/{instance_id}', 'json', req, runtime)
)
async def validate_order_update_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateOrderUpdateRequest,
headers: dingtalkyida__1__0_models.ValidateOrderUpdateHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateOrderUpdateResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateOrderUpdateResponse(),
await self.do_roarequest_async('ValidateOrderUpdate', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/orders/renewalReviews/{instance_id}', 'json', req, runtime)
)
def get_form_component_definition_list(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.GetFormComponentDefinitionListRequest,
) -> dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetFormComponentDefinitionListHeaders()
return self.get_form_component_definition_list_with_options(app_type, form_uuid, request, headers, runtime)
async def get_form_component_definition_list_async(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.GetFormComponentDefinitionListRequest,
) -> dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetFormComponentDefinitionListHeaders()
return await self.get_form_component_definition_list_with_options_async(app_type, form_uuid, request, headers, runtime)
def get_form_component_definition_list_with_options(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.GetFormComponentDefinitionListRequest,
headers: dingtalkyida__1__0_models.GetFormComponentDefinitionListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.version):
query['version'] = request.version
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse(),
self.do_roarequest('GetFormComponentDefinitionList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/definitions/{app_type}/{form_uuid}', 'json', req, runtime)
)
async def get_form_component_definition_list_with_options_async(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.GetFormComponentDefinitionListRequest,
headers: dingtalkyida__1__0_models.GetFormComponentDefinitionListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.version):
query['version'] = request.version
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetFormComponentDefinitionListResponse(),
await self.do_roarequest_async('GetFormComponentDefinitionList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/definitions/{app_type}/{form_uuid}', 'json', req, runtime)
)
def get_print_app_info(
self,
request: dingtalkyida__1__0_models.GetPrintAppInfoRequest,
) -> dingtalkyida__1__0_models.GetPrintAppInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPrintAppInfoHeaders()
return self.get_print_app_info_with_options(request, headers, runtime)
async def get_print_app_info_async(
self,
request: dingtalkyida__1__0_models.GetPrintAppInfoRequest,
) -> dingtalkyida__1__0_models.GetPrintAppInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetPrintAppInfoHeaders()
return await self.get_print_app_info_with_options_async(request, headers, runtime)
def get_print_app_info_with_options(
self,
request: dingtalkyida__1__0_models.GetPrintAppInfoRequest,
headers: dingtalkyida__1__0_models.GetPrintAppInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPrintAppInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.name_like):
query['nameLike'] = request.name_like
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPrintAppInfoResponse(),
self.do_roarequest('GetPrintAppInfo', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/printTemplates/printAppInfos', 'json', req, runtime)
)
async def get_print_app_info_with_options_async(
self,
request: dingtalkyida__1__0_models.GetPrintAppInfoRequest,
headers: dingtalkyida__1__0_models.GetPrintAppInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPrintAppInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.name_like):
query['nameLike'] = request.name_like
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPrintAppInfoResponse(),
await self.do_roarequest_async('GetPrintAppInfo', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/printTemplates/printAppInfos', 'json', req, runtime)
)
def save_form_data(
self,
request: dingtalkyida__1__0_models.SaveFormDataRequest,
) -> dingtalkyida__1__0_models.SaveFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SaveFormDataHeaders()
return self.save_form_data_with_options(request, headers, runtime)
async def save_form_data_async(
self,
request: dingtalkyida__1__0_models.SaveFormDataRequest,
) -> dingtalkyida__1__0_models.SaveFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SaveFormDataHeaders()
return await self.save_form_data_with_options_async(request, headers, runtime)
def save_form_data_with_options(
self,
request: dingtalkyida__1__0_models.SaveFormDataRequest,
headers: dingtalkyida__1__0_models.SaveFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SaveFormDataResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SaveFormDataResponse(),
self.do_roarequest('SaveFormData', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances', 'json', req, runtime)
)
async def save_form_data_with_options_async(
self,
request: dingtalkyida__1__0_models.SaveFormDataRequest,
headers: dingtalkyida__1__0_models.SaveFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SaveFormDataResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.form_uuid):
body['formUuid'] = request.form_uuid
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SaveFormDataResponse(),
await self.do_roarequest_async('SaveFormData', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances', 'json', req, runtime)
)
def get_me_corp_submission(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetMeCorpSubmissionRequest,
) -> dingtalkyida__1__0_models.GetMeCorpSubmissionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetMeCorpSubmissionHeaders()
return self.get_me_corp_submission_with_options(user_id, request, headers, runtime)
async def get_me_corp_submission_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetMeCorpSubmissionRequest,
) -> dingtalkyida__1__0_models.GetMeCorpSubmissionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetMeCorpSubmissionHeaders()
return await self.get_me_corp_submission_with_options_async(user_id, request, headers, runtime)
def get_me_corp_submission_with_options(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetMeCorpSubmissionRequest,
headers: dingtalkyida__1__0_models.GetMeCorpSubmissionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetMeCorpSubmissionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetMeCorpSubmissionResponse(),
self.do_roarequest('GetMeCorpSubmission', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/myCorpSubmission/{user_id}', 'json', req, runtime)
)
async def get_me_corp_submission_with_options_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetMeCorpSubmissionRequest,
headers: dingtalkyida__1__0_models.GetMeCorpSubmissionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetMeCorpSubmissionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.token):
query['token'] = request.token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetMeCorpSubmissionResponse(),
await self.do_roarequest_async('GetMeCorpSubmission', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/tasks/myCorpSubmission/{user_id}', 'json', req, runtime)
)
def delete_form_data(
self,
request: dingtalkyida__1__0_models.DeleteFormDataRequest,
) -> dingtalkyida__1__0_models.DeleteFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteFormDataHeaders()
return self.delete_form_data_with_options(request, headers, runtime)
async def delete_form_data_async(
self,
request: dingtalkyida__1__0_models.DeleteFormDataRequest,
) -> dingtalkyida__1__0_models.DeleteFormDataResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteFormDataHeaders()
return await self.delete_form_data_with_options_async(request, headers, runtime)
def delete_form_data_with_options(
self,
request: dingtalkyida__1__0_models.DeleteFormDataRequest,
headers: dingtalkyida__1__0_models.DeleteFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteFormDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
query['formInstanceId'] = request.form_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteFormDataResponse(),
self.do_roarequest('DeleteFormData', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/forms/instances', 'none', req, runtime)
)
async def delete_form_data_with_options_async(
self,
request: dingtalkyida__1__0_models.DeleteFormDataRequest,
headers: dingtalkyida__1__0_models.DeleteFormDataHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteFormDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.form_instance_id):
query['formInstanceId'] = request.form_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteFormDataResponse(),
await self.do_roarequest_async('DeleteFormData', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/forms/instances', 'none', req, runtime)
)
def search_form_data_id_list(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.SearchFormDataIdListRequest,
) -> dingtalkyida__1__0_models.SearchFormDataIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchFormDataIdListHeaders()
return self.search_form_data_id_list_with_options(app_type, form_uuid, request, headers, runtime)
async def search_form_data_id_list_async(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.SearchFormDataIdListRequest,
) -> dingtalkyida__1__0_models.SearchFormDataIdListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.SearchFormDataIdListHeaders()
return await self.search_form_data_id_list_with_options_async(app_type, form_uuid, request, headers, runtime)
def search_form_data_id_list_with_options(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.SearchFormDataIdListRequest,
headers: dingtalkyida__1__0_models.SearchFormDataIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchFormDataIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
body = {}
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchFormDataIdListResponse(),
self.do_roarequest('SearchFormDataIdList', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances/ids/{app_type}/{form_uuid}', 'json', req, runtime)
)
async def search_form_data_id_list_with_options_async(
self,
app_type: str,
form_uuid: str,
request: dingtalkyida__1__0_models.SearchFormDataIdListRequest,
headers: dingtalkyida__1__0_models.SearchFormDataIdListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.SearchFormDataIdListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
body = {}
if not UtilClient.is_unset(request.modified_to_time_gmt):
body['modifiedToTimeGMT'] = request.modified_to_time_gmt
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.modified_from_time_gmt):
body['modifiedFromTimeGMT'] = request.modified_from_time_gmt
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.search_field_json):
body['searchFieldJson'] = request.search_field_json
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.originator_id):
body['originatorId'] = request.originator_id
if not UtilClient.is_unset(request.create_to_time_gmt):
body['createToTimeGMT'] = request.create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
body['createFromTimeGMT'] = request.create_from_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.SearchFormDataIdListResponse(),
await self.do_roarequest_async('SearchFormDataIdList', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/forms/instances/ids/{app_type}/{form_uuid}', 'json', req, runtime)
)
def get_activation_code_by_caller_union_id(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdRequest,
) -> dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdHeaders()
return self.get_activation_code_by_caller_union_id_with_options(caller_uid, request, headers, runtime)
async def get_activation_code_by_caller_union_id_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdRequest,
) -> dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdHeaders()
return await self.get_activation_code_by_caller_union_id_with_options_async(caller_uid, request, headers, runtime)
def get_activation_code_by_caller_union_id_with_options(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdRequest,
headers: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse(),
self.do_roarequest('GetActivationCodeByCallerUnionId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applications/activationCodes/{caller_uid}', 'json', req, runtime)
)
async def get_activation_code_by_caller_union_id_with_options_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdRequest,
headers: dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivationCodeByCallerUnionIdResponse(),
await self.do_roarequest_async('GetActivationCodeByCallerUnionId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applications/activationCodes/{caller_uid}', 'json', req, runtime)
)
def get_form_data_by_id(
self,
id: str,
request: dingtalkyida__1__0_models.GetFormDataByIDRequest,
) -> dingtalkyida__1__0_models.GetFormDataByIDResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetFormDataByIDHeaders()
return self.get_form_data_by_idwith_options(id, request, headers, runtime)
async def get_form_data_by_id_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetFormDataByIDRequest,
) -> dingtalkyida__1__0_models.GetFormDataByIDResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetFormDataByIDHeaders()
return await self.get_form_data_by_idwith_options_async(id, request, headers, runtime)
def get_form_data_by_idwith_options(
self,
id: str,
request: dingtalkyida__1__0_models.GetFormDataByIDRequest,
headers: dingtalkyida__1__0_models.GetFormDataByIDHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetFormDataByIDResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetFormDataByIDResponse(),
self.do_roarequest('GetFormDataByID', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/instances/{id}', 'json', req, runtime)
)
async def get_form_data_by_idwith_options_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetFormDataByIDRequest,
headers: dingtalkyida__1__0_models.GetFormDataByIDHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetFormDataByIDResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetFormDataByIDResponse(),
await self.do_roarequest_async('GetFormDataByID', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/forms/instances/{id}', 'json', req, runtime)
)
def refund_commodity(
self,
request: dingtalkyida__1__0_models.RefundCommodityRequest,
) -> dingtalkyida__1__0_models.RefundCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RefundCommodityHeaders()
return self.refund_commodity_with_options(request, headers, runtime)
async def refund_commodity_async(
self,
request: dingtalkyida__1__0_models.RefundCommodityRequest,
) -> dingtalkyida__1__0_models.RefundCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RefundCommodityHeaders()
return await self.refund_commodity_with_options_async(request, headers, runtime)
def refund_commodity_with_options(
self,
request: dingtalkyida__1__0_models.RefundCommodityRequest,
headers: dingtalkyida__1__0_models.RefundCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RefundCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RefundCommodityResponse(),
self.do_roarequest('RefundCommodity', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/appAuth/commodities/refund', 'json', req, runtime)
)
async def refund_commodity_with_options_async(
self,
request: dingtalkyida__1__0_models.RefundCommodityRequest,
headers: dingtalkyida__1__0_models.RefundCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RefundCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RefundCommodityResponse(),
await self.do_roarequest_async('RefundCommodity', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/appAuth/commodities/refund', 'json', req, runtime)
)
def delete_sequence(
self,
request: dingtalkyida__1__0_models.DeleteSequenceRequest,
) -> dingtalkyida__1__0_models.DeleteSequenceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteSequenceHeaders()
return self.delete_sequence_with_options(request, headers, runtime)
async def delete_sequence_async(
self,
request: dingtalkyida__1__0_models.DeleteSequenceRequest,
) -> dingtalkyida__1__0_models.DeleteSequenceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteSequenceHeaders()
return await self.delete_sequence_with_options_async(request, headers, runtime)
def delete_sequence_with_options(
self,
request: dingtalkyida__1__0_models.DeleteSequenceRequest,
headers: dingtalkyida__1__0_models.DeleteSequenceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteSequenceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.sequence):
query['sequence'] = request.sequence
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteSequenceResponse(),
self.do_roarequest('DeleteSequence', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/forms/deleteSequence', 'none', req, runtime)
)
async def delete_sequence_with_options_async(
self,
request: dingtalkyida__1__0_models.DeleteSequenceRequest,
headers: dingtalkyida__1__0_models.DeleteSequenceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteSequenceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.sequence):
query['sequence'] = request.sequence
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteSequenceResponse(),
await self.do_roarequest_async('DeleteSequence', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/forms/deleteSequence', 'none', req, runtime)
)
def release_commodity(
self,
request: dingtalkyida__1__0_models.ReleaseCommodityRequest,
) -> dingtalkyida__1__0_models.ReleaseCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ReleaseCommodityHeaders()
return self.release_commodity_with_options(request, headers, runtime)
async def release_commodity_async(
self,
request: dingtalkyida__1__0_models.ReleaseCommodityRequest,
) -> dingtalkyida__1__0_models.ReleaseCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ReleaseCommodityHeaders()
return await self.release_commodity_with_options_async(request, headers, runtime)
def release_commodity_with_options(
self,
request: dingtalkyida__1__0_models.ReleaseCommodityRequest,
headers: dingtalkyida__1__0_models.ReleaseCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ReleaseCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ReleaseCommodityResponse(),
self.do_roarequest('ReleaseCommodity', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/appAuth/commodities/release', 'json', req, runtime)
)
async def release_commodity_with_options_async(
self,
request: dingtalkyida__1__0_models.ReleaseCommodityRequest,
headers: dingtalkyida__1__0_models.ReleaseCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ReleaseCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ReleaseCommodityResponse(),
await self.do_roarequest_async('ReleaseCommodity', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/appAuth/commodities/release', 'json', req, runtime)
)
def render_batch_callback(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenderBatchCallbackHeaders()
return self.render_batch_callback_with_options(request, headers, runtime)
async def render_batch_callback_async(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenderBatchCallbackHeaders()
return await self.render_batch_callback_with_options_async(request, headers, runtime)
def render_batch_callback_with_options(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
headers: dingtalkyida__1__0_models.RenderBatchCallbackHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.oss_url):
body['ossUrl'] = request.oss_url
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.namespace):
body['namespace'] = request.namespace
if not UtilClient.is_unset(request.time_zone):
body['timeZone'] = request.time_zone
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.source):
body['source'] = request.source
if not UtilClient.is_unset(request.sequence_id):
body['sequenceId'] = request.sequence_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenderBatchCallbackResponse(),
self.do_roarequest('RenderBatchCallback', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printings/callbacks/batch', 'none', req, runtime)
)
async def render_batch_callback_with_options_async(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
headers: dingtalkyida__1__0_models.RenderBatchCallbackHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.oss_url):
body['ossUrl'] = request.oss_url
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.namespace):
body['namespace'] = request.namespace
if not UtilClient.is_unset(request.time_zone):
body['timeZone'] = request.time_zone
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.source):
body['source'] = request.source
if not UtilClient.is_unset(request.sequence_id):
body['sequenceId'] = request.sequence_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenderBatchCallbackResponse(),
await self.do_roarequest_async('RenderBatchCallback', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printings/callbacks/batch', 'none', req, runtime)
)
def get_open_url(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOpenUrlHeaders()
return self.get_open_url_with_options(app_type, request, headers, runtime)
async def get_open_url_async(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOpenUrlHeaders()
return await self.get_open_url_with_options_async(app_type, request, headers, runtime)
def get_open_url_with_options(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
headers: dingtalkyida__1__0_models.GetOpenUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.file_url):
query['fileUrl'] = request.file_url
if not UtilClient.is_unset(request.timeout):
query['timeout'] = request.timeout
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOpenUrlResponse(),
self.do_roarequest('GetOpenUrl', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/temporaryUrls/{app_type}', 'json', req, runtime)
)
async def get_open_url_with_options_async(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
headers: dingtalkyida__1__0_models.GetOpenUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.file_url):
query['fileUrl'] = request.file_url
if not UtilClient.is_unset(request.timeout):
query['timeout'] = request.timeout
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOpenUrlResponse(),
await self.do_roarequest_async('GetOpenUrl', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/temporaryUrls/{app_type}', 'json', req, runtime)
)
def get_sale_user_info_by_user_id(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders()
return self.get_sale_user_info_by_user_id_with_options(request, headers, runtime)
async def get_sale_user_info_by_user_id_async(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders()
return await self.get_sale_user_info_by_user_id_with_options_async(request, headers, runtime)
def get_sale_user_info_by_user_id_with_options(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
headers: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.namespace):
query['namespace'] = request.namespace
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse(),
self.do_roarequest('GetSaleUserInfoByUserId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/saleUserInfo', 'json', req, runtime)
)
async def get_sale_user_info_by_user_id_with_options_async(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
headers: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.namespace):
query['namespace'] = request.namespace
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse(),
await self.do_roarequest_async('GetSaleUserInfoByUserId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/saleUserInfo', 'json', req, runtime)
)
def validate_application_authorization_order(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders()
return self.validate_application_authorization_order_with_options(instance_id, request, headers, runtime)
async def validate_application_authorization_order_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders()
return await self.validate_application_authorization_order_with_options_async(instance_id, request, headers, runtime)
def validate_application_authorization_order_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse(),
self.do_roarequest('ValidateApplicationAuthorizationOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationOrderUpdateAuthorizations/{instance_id}', 'json', req, runtime)
)
async def validate_application_authorization_order_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse(),
await self.do_roarequest_async('ValidateApplicationAuthorizationOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationOrderUpdateAuthorizations/{instance_id}', 'json', req, runtime)
)
def execute_task(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteTaskHeaders()
return self.execute_task_with_options(request, headers, runtime)
async def execute_task_async(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteTaskHeaders()
return await self.execute_task_with_options_async(request, headers, runtime)
def execute_task_with_options(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
headers: dingtalkyida__1__0_models.ExecuteTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteTaskResponse(),
self.do_roarequest('ExecuteTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/execute', 'none', req, runtime)
)
async def execute_task_with_options_async(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
headers: dingtalkyida__1__0_models.ExecuteTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteTaskResponse(),
await self.do_roarequest_async('ExecuteTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/execute', 'none', req, runtime)
)
def delete_instance(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteInstanceHeaders()
return self.delete_instance_with_options(request, headers, runtime)
async def delete_instance_async(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteInstanceHeaders()
return await self.delete_instance_with_options_async(request, headers, runtime)
def delete_instance_with_options(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
headers: dingtalkyida__1__0_models.DeleteInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteInstanceResponse(),
self.do_roarequest('DeleteInstance', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/processes/instances', 'none', req, runtime)
)
async def delete_instance_with_options_async(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
headers: dingtalkyida__1__0_models.DeleteInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteInstanceResponse(),
await self.do_roarequest_async('DeleteInstance', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/processes/instances', 'none', req, runtime)
) | 0.503418 | 0.043773 |
from __future__ import annotations
import inspect
import os
import re
import shlex
from enum import Enum
from typing import Iterable, Pattern, Sequence
from pants.option.errors import ParseError
from pants.util.eval import parse_expression
from pants.util.memo import memoized_method
class UnsetBool:
"""A type that can be used as the default value for a bool typed option to indicate un-set.
In other words, `bool`-typed options with a `default=UnsetBool` that are not explicitly set will
have the value `None`, enabling a tri-state.
:API: public
"""
def __init__(self) -> None:
raise NotImplementedError(
"UnsetBool cannot be instantiated. It should only be used as a sentinel type."
)
@classmethod
def coerce_bool(cls, value: type[UnsetBool] | bool | None, default: bool) -> bool:
if value is None:
return default
if value is cls:
return default
assert isinstance(value, bool)
return value
def target_option(s: str) -> str:
"""Same type as 'str', but indicates a single target spec.
:API: public
TODO(stuhood): Eagerly convert these to Addresses: see https://rbcommons.com/s/twitter/r/2937/
"""
return s
def _normalize_directory_separators(s: str) -> str:
"""Coalesce runs of consecutive instances of `os.sep` in `s`, e.g. '//' -> '/' on POSIX.
The engine will use paths or target addresses either to form globs or to string-match against, and
including the directory separator '/' multiple times in a row e.g. '//' produces an equivalent
glob as with a single '/', but produces a different actual string, which will cause the engine to
fail to glob file paths or target specs correctly.
TODO: give the engine more control over matching paths so we don't have to sanitize the input!
"""
return os.path.normpath(s)
def dir_option(s: str) -> str:
"""Same type as 'str', but indicates string represents a directory path.
:API: public
"""
return _normalize_directory_separators(s)
def file_option(s: str) -> str:
"""Same type as 'str', but indicates string represents a filepath.
:API: public
"""
return _normalize_directory_separators(s)
def dict_with_files_option(s):
"""Same as 'dict', but fingerprints the file contents of any values which are file paths.
For any value which matches the path of a file on disk, the file path is not fingerprinted -- only
its contents.
:API: public
"""
return DictValueComponent.create(s)
def shell_str(s: str) -> str:
"""A member_type for strings that should be split upon parsing through `shlex.split()`.
For example, the option value `--foo --bar=val` would be split into `['--foo', '--bar=val']`,
and then the parser will safely merge this expanded list with any other values defined for the
option.
:API: public
"""
return s
def memory_size(s: str | int | float) -> int:
"""A string that normalizes the suffixes {GiB, MiB, KiB, B} into the number of bytes.
:API: public
"""
if isinstance(s, (int, float)):
return int(s)
if not s:
raise ParseError("Missing value.")
original = s
s = s.lower().strip()
try:
return int(float(s))
except ValueError:
pass
invalid = ParseError(
f"Invalid value: `{original}`. Expected either a bare number or a number with one of "
f"`GiB`, `MiB`, `KiB`, or `B`."
)
def convert_to_bytes(power_of_2) -> int:
try:
return int(float(s[:-3]) * (2 ** power_of_2)) # type: ignore[index]
except TypeError:
raise invalid
if s.endswith("gib"):
return convert_to_bytes(30)
elif s.endswith("mib"):
return convert_to_bytes(20)
elif s.endswith("kib"):
return convert_to_bytes(10)
elif s.endswith("b"):
try:
return int(float(s[:-1]))
except TypeError:
raise invalid
raise invalid
def _convert(val, acceptable_types):
"""Ensure that val is one of the acceptable types, converting it if needed.
:param val: The value we're parsing (either a string or one of the acceptable types).
:param acceptable_types: A tuple of expected types for val.
:returns: The parsed value.
:raises :class:`pants.options.errors.ParseError`: if there was a problem parsing the val as an
acceptable type.
"""
if isinstance(val, acceptable_types):
return val
return parse_expression(val, acceptable_types, raise_type=ParseError)
def _convert_list(val, member_type, is_enum):
converted = _convert(val, (list, tuple))
if not is_enum:
return converted
return [item if isinstance(item, member_type) else member_type(item) for item in converted]
def _flatten_shlexed_list(shlexed_args: Sequence[str]) -> list[str]:
"""Convert a list of shlexed args into a flattened list of individual args.
For example, ['arg1 arg2=foo', '--arg3'] would be converted to ['arg1', 'arg2=foo', '--arg3'].
"""
return [arg for shlexed_arg in shlexed_args for arg in shlex.split(shlexed_arg)]
class ListValueComponent:
"""A component of the value of a list-typed option.
One or more instances of this class can be merged to form a list value.
A component consists of values to append and values to filter while constructing the final list.
Each component may either replace or modify the preceding component. So that, e.g., a config
file can append to and/or filter the default value list, instead of having to repeat most
of the contents of the default value list.
"""
REPLACE = "REPLACE"
MODIFY = "MODIFY"
# We use a regex to parse the comma-separated lists of modifier expressions (each of which is
# a list or tuple literal preceded by a + or a -). Note that these expressions are technically
# a context-free grammar, but in practice using this regex as a heuristic will work fine. The
# values that could defeat it are extremely unlikely to be encountered in practice.
# If we do ever encounter them, we'll have to replace this with a real parser.
@classmethod
@memoized_method
def _get_modifier_expr_re(cls) -> Pattern[str]:
# Note that the regex consists of a positive lookbehind assertion for a ] or a ),
# followed by a comma (possibly surrounded by whitespace), followed by a
# positive lookahead assertion for [ or (. The lookahead/lookbehind assertions mean that
# the bracket/paren characters don't get consumed in the split.
return re.compile(r"(?<=\]|\))\s*,\s*(?=[+-](?:\[|\())")
@classmethod
def _split_modifier_expr(cls, s: str) -> list[str]:
# This check ensures that the first expression (before the first split point) is a modification.
if s.startswith("+") or s.startswith("-"):
return cls._get_modifier_expr_re().split(s)
return [s]
@classmethod
def merge(cls, components: Iterable[ListValueComponent]) -> ListValueComponent:
"""Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
"""
# Note that action of the merged component is MODIFY until the first REPLACE is encountered.
# This guarantees associativity.
action = cls.MODIFY
appends = []
filters = []
for component in components:
if component._action is cls.REPLACE:
appends = component._appends
filters = component._filters
action = cls.REPLACE
elif component._action is cls.MODIFY:
appends.extend(component._appends)
filters.extend(component._filters)
else:
raise ParseError(f"Unknown action for list value: {component._action}")
return cls(action, appends, filters)
def __init__(self, action: str, appends: list, filters: list) -> None:
self._action = action
self._appends = appends
self._filters = filters
@property
def val(self) -> list:
ret = list(self._appends)
for x in self._filters:
# Note: can't do ret.remove(x) because that only removes the first instance of x.
ret = [y for y in ret if y != x]
return ret
@property
def action(self):
return self._action
@classmethod
def create(cls, value, member_type=str) -> ListValueComponent:
"""Interpret value as either a list or something to extend another list with.
Note that we accept tuple literals, but the internal value is always a list.
:param value: The value to convert. Can be an instance of ListValueComponent, a list, a tuple,
a string representation of a list or tuple (possibly prefixed by + or -
indicating modification instead of replacement), or any allowed member_type.
May also be a comma-separated sequence of modifications.
"""
if isinstance(value, cls): # Ensure idempotency.
return value
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, str):
comma_separated_exprs = cls._split_modifier_expr(value)
if len(comma_separated_exprs) > 1:
return cls.merge([cls.create(x) for x in comma_separated_exprs])
action = cls.MODIFY
appends: Sequence[str] = []
filters: Sequence[str] = []
is_enum = inspect.isclass(member_type) and issubclass(member_type, Enum)
if isinstance(value, (list, tuple)): # Ensure we can handle list-typed default values.
action = cls.REPLACE
appends = value
elif value.startswith("[") or value.startswith("("):
action = cls.REPLACE
appends = _convert_list(value, member_type, is_enum)
elif value.startswith("+[") or value.startswith("+("):
appends = _convert_list(value[1:], member_type, is_enum)
elif value.startswith("-[") or value.startswith("-("):
filters = _convert_list(value[1:], member_type, is_enum)
elif is_enum and isinstance(value, str):
appends = _convert_list([value], member_type, True)
elif isinstance(value, str):
appends = [value]
else:
appends = _convert(f"[{value}]", list)
if member_type == shell_str:
appends = _flatten_shlexed_list(appends)
filters = _flatten_shlexed_list(filters)
return cls(action, list(appends), list(filters))
def __repr__(self) -> str:
return f"{self._action} +{self._appends} -{self._filters}"
class DictValueComponent:
"""A component of the value of a dict-typed option.
One or more instances of this class can be merged to form a dict value.
Each component may either replace or extend the preceding component. So that, e.g., a config
file can extend the default value of a dict, instead of having to repeat it.
"""
REPLACE = "REPLACE"
EXTEND = "EXTEND"
@classmethod
def merge(cls, components: Iterable[DictValueComponent]) -> DictValueComponent:
"""Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
"""
# Note that action of the merged component is EXTEND until the first REPLACE is encountered.
# This guarantees associativity.
action = cls.EXTEND
val = {}
for component in components:
if component.action is cls.REPLACE:
val = component.val
action = cls.REPLACE
elif component.action is cls.EXTEND:
val.update(component.val)
else:
raise ParseError(f"Unknown action for dict value: {component.action}")
return cls(action, val)
def __init__(self, action: str, val: dict) -> None:
self.action = action
self.val = val
@classmethod
def create(cls, value) -> DictValueComponent:
"""Interpret value as either a dict or something to extend another dict with.
:param value: The value to convert. Can be an instance of DictValueComponent, a dict,
or a string representation (possibly prefixed by +) of a dict.
"""
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, cls): # Ensure idempotency.
action = value.action
val = value.val
elif isinstance(value, dict): # Ensure we can handle dict-typed default values.
action = cls.REPLACE
val = value
elif value.startswith("{"):
action = cls.REPLACE
val = _convert(value, dict)
elif value.startswith("+{"):
action = cls.EXTEND
val = _convert(value[1:], dict)
else:
raise ParseError(f"Invalid dict value: {value}")
return cls(action, dict(val))
def __repr__(self) -> str:
return f"{self.action} {self.val}" | src/python/pants/option/custom_types.py |
from __future__ import annotations
import inspect
import os
import re
import shlex
from enum import Enum
from typing import Iterable, Pattern, Sequence
from pants.option.errors import ParseError
from pants.util.eval import parse_expression
from pants.util.memo import memoized_method
class UnsetBool:
"""A type that can be used as the default value for a bool typed option to indicate un-set.
In other words, `bool`-typed options with a `default=UnsetBool` that are not explicitly set will
have the value `None`, enabling a tri-state.
:API: public
"""
def __init__(self) -> None:
raise NotImplementedError(
"UnsetBool cannot be instantiated. It should only be used as a sentinel type."
)
@classmethod
def coerce_bool(cls, value: type[UnsetBool] | bool | None, default: bool) -> bool:
if value is None:
return default
if value is cls:
return default
assert isinstance(value, bool)
return value
def target_option(s: str) -> str:
"""Same type as 'str', but indicates a single target spec.
:API: public
TODO(stuhood): Eagerly convert these to Addresses: see https://rbcommons.com/s/twitter/r/2937/
"""
return s
def _normalize_directory_separators(s: str) -> str:
"""Coalesce runs of consecutive instances of `os.sep` in `s`, e.g. '//' -> '/' on POSIX.
The engine will use paths or target addresses either to form globs or to string-match against, and
including the directory separator '/' multiple times in a row e.g. '//' produces an equivalent
glob as with a single '/', but produces a different actual string, which will cause the engine to
fail to glob file paths or target specs correctly.
TODO: give the engine more control over matching paths so we don't have to sanitize the input!
"""
return os.path.normpath(s)
def dir_option(s: str) -> str:
"""Same type as 'str', but indicates string represents a directory path.
:API: public
"""
return _normalize_directory_separators(s)
def file_option(s: str) -> str:
"""Same type as 'str', but indicates string represents a filepath.
:API: public
"""
return _normalize_directory_separators(s)
def dict_with_files_option(s):
"""Same as 'dict', but fingerprints the file contents of any values which are file paths.
For any value which matches the path of a file on disk, the file path is not fingerprinted -- only
its contents.
:API: public
"""
return DictValueComponent.create(s)
def shell_str(s: str) -> str:
"""A member_type for strings that should be split upon parsing through `shlex.split()`.
For example, the option value `--foo --bar=val` would be split into `['--foo', '--bar=val']`,
and then the parser will safely merge this expanded list with any other values defined for the
option.
:API: public
"""
return s
def memory_size(s: str | int | float) -> int:
"""A string that normalizes the suffixes {GiB, MiB, KiB, B} into the number of bytes.
:API: public
"""
if isinstance(s, (int, float)):
return int(s)
if not s:
raise ParseError("Missing value.")
original = s
s = s.lower().strip()
try:
return int(float(s))
except ValueError:
pass
invalid = ParseError(
f"Invalid value: `{original}`. Expected either a bare number or a number with one of "
f"`GiB`, `MiB`, `KiB`, or `B`."
)
def convert_to_bytes(power_of_2) -> int:
try:
return int(float(s[:-3]) * (2 ** power_of_2)) # type: ignore[index]
except TypeError:
raise invalid
if s.endswith("gib"):
return convert_to_bytes(30)
elif s.endswith("mib"):
return convert_to_bytes(20)
elif s.endswith("kib"):
return convert_to_bytes(10)
elif s.endswith("b"):
try:
return int(float(s[:-1]))
except TypeError:
raise invalid
raise invalid
def _convert(val, acceptable_types):
"""Ensure that val is one of the acceptable types, converting it if needed.
:param val: The value we're parsing (either a string or one of the acceptable types).
:param acceptable_types: A tuple of expected types for val.
:returns: The parsed value.
:raises :class:`pants.options.errors.ParseError`: if there was a problem parsing the val as an
acceptable type.
"""
if isinstance(val, acceptable_types):
return val
return parse_expression(val, acceptable_types, raise_type=ParseError)
def _convert_list(val, member_type, is_enum):
converted = _convert(val, (list, tuple))
if not is_enum:
return converted
return [item if isinstance(item, member_type) else member_type(item) for item in converted]
def _flatten_shlexed_list(shlexed_args: Sequence[str]) -> list[str]:
"""Convert a list of shlexed args into a flattened list of individual args.
For example, ['arg1 arg2=foo', '--arg3'] would be converted to ['arg1', 'arg2=foo', '--arg3'].
"""
return [arg for shlexed_arg in shlexed_args for arg in shlex.split(shlexed_arg)]
class ListValueComponent:
"""A component of the value of a list-typed option.
One or more instances of this class can be merged to form a list value.
A component consists of values to append and values to filter while constructing the final list.
Each component may either replace or modify the preceding component. So that, e.g., a config
file can append to and/or filter the default value list, instead of having to repeat most
of the contents of the default value list.
"""
REPLACE = "REPLACE"
MODIFY = "MODIFY"
# We use a regex to parse the comma-separated lists of modifier expressions (each of which is
# a list or tuple literal preceded by a + or a -). Note that these expressions are technically
# a context-free grammar, but in practice using this regex as a heuristic will work fine. The
# values that could defeat it are extremely unlikely to be encountered in practice.
# If we do ever encounter them, we'll have to replace this with a real parser.
@classmethod
@memoized_method
def _get_modifier_expr_re(cls) -> Pattern[str]:
# Note that the regex consists of a positive lookbehind assertion for a ] or a ),
# followed by a comma (possibly surrounded by whitespace), followed by a
# positive lookahead assertion for [ or (. The lookahead/lookbehind assertions mean that
# the bracket/paren characters don't get consumed in the split.
return re.compile(r"(?<=\]|\))\s*,\s*(?=[+-](?:\[|\())")
@classmethod
def _split_modifier_expr(cls, s: str) -> list[str]:
# This check ensures that the first expression (before the first split point) is a modification.
if s.startswith("+") or s.startswith("-"):
return cls._get_modifier_expr_re().split(s)
return [s]
@classmethod
def merge(cls, components: Iterable[ListValueComponent]) -> ListValueComponent:
"""Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
"""
# Note that action of the merged component is MODIFY until the first REPLACE is encountered.
# This guarantees associativity.
action = cls.MODIFY
appends = []
filters = []
for component in components:
if component._action is cls.REPLACE:
appends = component._appends
filters = component._filters
action = cls.REPLACE
elif component._action is cls.MODIFY:
appends.extend(component._appends)
filters.extend(component._filters)
else:
raise ParseError(f"Unknown action for list value: {component._action}")
return cls(action, appends, filters)
def __init__(self, action: str, appends: list, filters: list) -> None:
self._action = action
self._appends = appends
self._filters = filters
@property
def val(self) -> list:
ret = list(self._appends)
for x in self._filters:
# Note: can't do ret.remove(x) because that only removes the first instance of x.
ret = [y for y in ret if y != x]
return ret
@property
def action(self):
return self._action
@classmethod
def create(cls, value, member_type=str) -> ListValueComponent:
"""Interpret value as either a list or something to extend another list with.
Note that we accept tuple literals, but the internal value is always a list.
:param value: The value to convert. Can be an instance of ListValueComponent, a list, a tuple,
a string representation of a list or tuple (possibly prefixed by + or -
indicating modification instead of replacement), or any allowed member_type.
May also be a comma-separated sequence of modifications.
"""
if isinstance(value, cls): # Ensure idempotency.
return value
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, str):
comma_separated_exprs = cls._split_modifier_expr(value)
if len(comma_separated_exprs) > 1:
return cls.merge([cls.create(x) for x in comma_separated_exprs])
action = cls.MODIFY
appends: Sequence[str] = []
filters: Sequence[str] = []
is_enum = inspect.isclass(member_type) and issubclass(member_type, Enum)
if isinstance(value, (list, tuple)): # Ensure we can handle list-typed default values.
action = cls.REPLACE
appends = value
elif value.startswith("[") or value.startswith("("):
action = cls.REPLACE
appends = _convert_list(value, member_type, is_enum)
elif value.startswith("+[") or value.startswith("+("):
appends = _convert_list(value[1:], member_type, is_enum)
elif value.startswith("-[") or value.startswith("-("):
filters = _convert_list(value[1:], member_type, is_enum)
elif is_enum and isinstance(value, str):
appends = _convert_list([value], member_type, True)
elif isinstance(value, str):
appends = [value]
else:
appends = _convert(f"[{value}]", list)
if member_type == shell_str:
appends = _flatten_shlexed_list(appends)
filters = _flatten_shlexed_list(filters)
return cls(action, list(appends), list(filters))
def __repr__(self) -> str:
return f"{self._action} +{self._appends} -{self._filters}"
class DictValueComponent:
"""A component of the value of a dict-typed option.
One or more instances of this class can be merged to form a dict value.
Each component may either replace or extend the preceding component. So that, e.g., a config
file can extend the default value of a dict, instead of having to repeat it.
"""
REPLACE = "REPLACE"
EXTEND = "EXTEND"
@classmethod
def merge(cls, components: Iterable[DictValueComponent]) -> DictValueComponent:
"""Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
"""
# Note that action of the merged component is EXTEND until the first REPLACE is encountered.
# This guarantees associativity.
action = cls.EXTEND
val = {}
for component in components:
if component.action is cls.REPLACE:
val = component.val
action = cls.REPLACE
elif component.action is cls.EXTEND:
val.update(component.val)
else:
raise ParseError(f"Unknown action for dict value: {component.action}")
return cls(action, val)
def __init__(self, action: str, val: dict) -> None:
self.action = action
self.val = val
@classmethod
def create(cls, value) -> DictValueComponent:
"""Interpret value as either a dict or something to extend another dict with.
:param value: The value to convert. Can be an instance of DictValueComponent, a dict,
or a string representation (possibly prefixed by +) of a dict.
"""
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, cls): # Ensure idempotency.
action = value.action
val = value.val
elif isinstance(value, dict): # Ensure we can handle dict-typed default values.
action = cls.REPLACE
val = value
elif value.startswith("{"):
action = cls.REPLACE
val = _convert(value, dict)
elif value.startswith("+{"):
action = cls.EXTEND
val = _convert(value[1:], dict)
else:
raise ParseError(f"Invalid dict value: {value}")
return cls(action, dict(val))
def __repr__(self) -> str:
return f"{self.action} {self.val}" | 0.843573 | 0.477554 |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from math import e
def plotData(
fileTypePlots,
transformedData,
verbose):
"""Plot query data using matplotlib"""
# making functions more explicit
transformedData = transformedData
fileTypePlots = fileTypePlots
verbose = verbose
# inform user of program progress
if verbose >= 1:
print(
"Plotting Data"
)
mpl.use("Agg") # Force matplotlib to not use any windows backend
data = transformedData
data = data.sort_values(
by = ["total_duration"],
ascending = False,
)
plotData = data["total_duration"].dropna().astype(float)
fig, ax = plt.subplots()
plt.rc(
"xtick",
labelsize=5,
)
plt.rc(
"ytick",
labelsize=5,
)
plt.rc(
"figure",
titlesize=8.
)
# inform user of program progress
if verbose >= 2:
print(
"Mean, Std, and Normpdf are being calculated"
)
# calculating required variables
dataMean = np.mean(plotData)
dataStandardOfDeviation = np.std(plotData)
pdf = stats.norm.pdf(
plotData,
dataMean,
dataStandardOfDeviation,
)
longest = plotData[0:1].values
# inform user of program progress
if verbose >= 2:
print(
"Mean, Std, and Normpdf have been calculated"
)
# giving our plot a title
plt.suptitle(
"Distribution of Latency Across Different Queries\n" \
+ "The 99.99% of the distribution is " + str(longest) + "ms",\
size = 8,
)
# Plot 1 -> line graph w/ default x-scale
# inform user of program progress
if verbose >= 2:
print(
"Plot 1 is being created"
)
plt.subplot(221).set_title("time(ms)/pdf")
plt.plot(
pdf,
plotData,
)
# inform user of program progress
if verbose >= 2:
print(
"Plot 1 has been created"
)
# Plot 2 -> hist w/ default x-scale
# inform user of program progress
if verbose >= 2:
print(
"Plot 2 is being created"
)
plt.subplot(222).set_title("pdf/time(ms)") # places the plot in quadrant I
plt.hist(
plotData,
20,
density=True,
)
plt.plot(
plotData,
pdf,
)
# inform user of program progress
if verbose >= 2:
print(
"Plot 2 has been created"
)
# Plot 3 -> line graph w/ log base 2 x-scale
# inform user of program progress
if verbose >= 2:
print(
"Plot 3 is being created"
)
plt.subplot(223).set_title("time(ms)/pdf x=log2") # places the plot in quadrant III
plt.semilogx(
pdf,
plotData,
basex=2,
)
# inform program of user progress
if verbose >= 2:
print(
"Plot 3 has been created"
)
# Plot 4 -> line graph w/ Log base e x-scale
# inform user of program progress
if verbose >= 2:
print(
"Plot 4 is being created"
)
plt.subplot(224).set_title("time(ms)/pdf x=log10") # places the plot in quadrant IV
plt.semilogx(
pdf,
plotData,
basex = 10,
)
# inform user of program progress
if verbose >= 2:
print(
"Plot 4 has been created"
)
plt.subplots_adjust(
hspace=0.3,
wspace=0.3,
)
plt_name = "results/queries." + fileTypePlots
plt.savefig(plt_name)
# inform user of program progress
if verbose >= 1:
print(
"Data has been plotted"
) | valkyrie_pkg/plot_data.py | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from math import e
def plotData(
fileTypePlots,
transformedData,
verbose):
"""Plot query data using matplotlib"""
# making functions more explicit
transformedData = transformedData
fileTypePlots = fileTypePlots
verbose = verbose
# inform user of program progress
if verbose >= 1:
print(
"Plotting Data"
)
mpl.use("Agg") # Force matplotlib to not use any windows backend
data = transformedData
data = data.sort_values(
by = ["total_duration"],
ascending = False,
)
plotData = data["total_duration"].dropna().astype(float)
fig, ax = plt.subplots()
plt.rc(
"xtick",
labelsize=5,
)
plt.rc(
"ytick",
labelsize=5,
)
plt.rc(
"figure",
titlesize=8.
)
# inform user of program progress
if verbose >= 2:
print(
"Mean, Std, and Normpdf are being calculated"
)
# calculating required variables
dataMean = np.mean(plotData)
dataStandardOfDeviation = np.std(plotData)
pdf = stats.norm.pdf(
plotData,
dataMean,
dataStandardOfDeviation,
)
longest = plotData[0:1].values
# inform user of program progress
if verbose >= 2:
print(
"Mean, Std, and Normpdf have been calculated"
)
# giving our plot a title
plt.suptitle(
"Distribution of Latency Across Different Queries\n" \
+ "The 99.99% of the distribution is " + str(longest) + "ms",\
size = 8,
)
# Plot 1 -> line graph w/ default x-scale
# inform user of program progress
if verbose >= 2:
print(
"Plot 1 is being created"
)
plt.subplot(221).set_title("time(ms)/pdf")
plt.plot(
pdf,
plotData,
)
# inform user of program progress
if verbose >= 2:
print(
"Plot 1 has been created"
)
# Plot 2 -> hist w/ default x-scale
# inform user of program progress
if verbose >= 2:
print(
"Plot 2 is being created"
)
plt.subplot(222).set_title("pdf/time(ms)") # places the plot in quadrant I
plt.hist(
plotData,
20,
density=True,
)
plt.plot(
plotData,
pdf,
)
# inform user of program progress
if verbose >= 2:
print(
"Plot 2 has been created"
)
# Plot 3 -> line graph w/ log base 2 x-scale
# inform user of program progress
if verbose >= 2:
print(
"Plot 3 is being created"
)
plt.subplot(223).set_title("time(ms)/pdf x=log2") # places the plot in quadrant III
plt.semilogx(
pdf,
plotData,
basex=2,
)
# inform program of user progress
if verbose >= 2:
print(
"Plot 3 has been created"
)
# Plot 4 -> line graph w/ Log base e x-scale
# inform user of program progress
if verbose >= 2:
print(
"Plot 4 is being created"
)
plt.subplot(224).set_title("time(ms)/pdf x=log10") # places the plot in quadrant IV
plt.semilogx(
pdf,
plotData,
basex = 10,
)
# inform user of program progress
if verbose >= 2:
print(
"Plot 4 has been created"
)
plt.subplots_adjust(
hspace=0.3,
wspace=0.3,
)
plt_name = "results/queries." + fileTypePlots
plt.savefig(plt_name)
# inform user of program progress
if verbose >= 1:
print(
"Data has been plotted"
) | 0.737442 | 0.61878 |
"""find_similar_issues tests."""
import mock
import unittest
import webapp2
import webtest
from datastore import data_types
from handlers.testcase_detail import find_similar_issues
from issue_management.monorail import issue
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
@test_utils.with_cloud_emulators('datastore')
class HandlerTest(unittest.TestCase):
"""Test FindSimilarIssuesHandler."""
def setUp(self):
test_helpers.patch(self, [
'issue_management.issue_tracker_utils.get_issue_tracker_manager',
'issue_management.issue_tracker_utils.get_similar_issues',
'issue_management.issue_tracker_utils.get_similar_issues_query',
'issue_management.issue_tracker_utils.get_similar_issues_url',
'issue_management.issue_tracker_utils.get_issue_url',
'libs.access.check_access_and_get_testcase',
])
self.app = webtest.TestApp(
webapp2.WSGIApplication([('/', find_similar_issues.Handler)]))
self.testcase = data_types.Testcase()
self.testcase.put()
self.mock.check_access_and_get_testcase.return_value = self.testcase
self.invalid_testcase_id = self.testcase.key.id() + 1
def test_itm_not_found(self):
"""Ensure it errors when issue_tracker_manager doesn't exist."""
self.mock.get_issue_tracker_manager.return_value = None
response = self.app.get(
'/?testcaseId=%d&filterType=open' % self.testcase.key.id(),
expect_errors=True)
self.assertEqual(response.status_int, 404)
self.mock.get_issue_tracker_manager.assert_has_calls([mock.call(mock.ANY)])
self.assertEqual(
self.testcase.key.id(),
self.mock.get_issue_tracker_manager.call_args[0][0].key.id())
def test_find(self):
"""Ensure it returns correct JSON when everything is ok."""
itm = mock.Mock()
issue_item = issue.Issue()
issue_item.id = 100
self.mock.get_issue_tracker_manager.return_value = itm
self.mock.get_similar_issues_url.return_value = 'similarurl'
self.mock.get_similar_issues_query.return_value = 'query'
self.mock.get_similar_issues.return_value = [issue_item]
self.mock.get_issue_url.return_value = 'issueurl'
response = self.app.get(
'/?testcaseId=%d&filterType=open' % self.testcase.key.id())
self.assertEqual(response.status_int, 200)
self.assertEqual(response.json['queryString'], 'query')
self.assertEqual(response.json['queryUrl'], 'similarurl')
self.assertEqual(response.json['issueUrlPrefix'], 'issueurl')
self.assertEqual(len(response.json['items']), 1)
self.assertEqual(response.json['items'][0]['id'], issue_item.id)
self.mock.get_issue_tracker_manager.assert_has_calls([mock.call(mock.ANY)])
self.assertEqual(
self.testcase.key.id(),
self.mock.get_issue_tracker_manager.call_args[0][0].key.id())
self.mock.get_similar_issues.assert_has_calls(
[mock.call(mock.ANY, 'open', itm)])
self.assertEqual(self.testcase.key.id(),
self.mock.get_similar_issues.call_args[0][0].key.id()) | src/python/tests/appengine/handlers/testcase_detail/find_similar_issues_test.py | """find_similar_issues tests."""
import mock
import unittest
import webapp2
import webtest
from datastore import data_types
from handlers.testcase_detail import find_similar_issues
from issue_management.monorail import issue
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
@test_utils.with_cloud_emulators('datastore')
class HandlerTest(unittest.TestCase):
"""Test FindSimilarIssuesHandler."""
def setUp(self):
test_helpers.patch(self, [
'issue_management.issue_tracker_utils.get_issue_tracker_manager',
'issue_management.issue_tracker_utils.get_similar_issues',
'issue_management.issue_tracker_utils.get_similar_issues_query',
'issue_management.issue_tracker_utils.get_similar_issues_url',
'issue_management.issue_tracker_utils.get_issue_url',
'libs.access.check_access_and_get_testcase',
])
self.app = webtest.TestApp(
webapp2.WSGIApplication([('/', find_similar_issues.Handler)]))
self.testcase = data_types.Testcase()
self.testcase.put()
self.mock.check_access_and_get_testcase.return_value = self.testcase
self.invalid_testcase_id = self.testcase.key.id() + 1
def test_itm_not_found(self):
"""Ensure it errors when issue_tracker_manager doesn't exist."""
self.mock.get_issue_tracker_manager.return_value = None
response = self.app.get(
'/?testcaseId=%d&filterType=open' % self.testcase.key.id(),
expect_errors=True)
self.assertEqual(response.status_int, 404)
self.mock.get_issue_tracker_manager.assert_has_calls([mock.call(mock.ANY)])
self.assertEqual(
self.testcase.key.id(),
self.mock.get_issue_tracker_manager.call_args[0][0].key.id())
def test_find(self):
"""Ensure it returns correct JSON when everything is ok."""
itm = mock.Mock()
issue_item = issue.Issue()
issue_item.id = 100
self.mock.get_issue_tracker_manager.return_value = itm
self.mock.get_similar_issues_url.return_value = 'similarurl'
self.mock.get_similar_issues_query.return_value = 'query'
self.mock.get_similar_issues.return_value = [issue_item]
self.mock.get_issue_url.return_value = 'issueurl'
response = self.app.get(
'/?testcaseId=%d&filterType=open' % self.testcase.key.id())
self.assertEqual(response.status_int, 200)
self.assertEqual(response.json['queryString'], 'query')
self.assertEqual(response.json['queryUrl'], 'similarurl')
self.assertEqual(response.json['issueUrlPrefix'], 'issueurl')
self.assertEqual(len(response.json['items']), 1)
self.assertEqual(response.json['items'][0]['id'], issue_item.id)
self.mock.get_issue_tracker_manager.assert_has_calls([mock.call(mock.ANY)])
self.assertEqual(
self.testcase.key.id(),
self.mock.get_issue_tracker_manager.call_args[0][0].key.id())
self.mock.get_similar_issues.assert_has_calls(
[mock.call(mock.ANY, 'open', itm)])
self.assertEqual(self.testcase.key.id(),
self.mock.get_similar_issues.call_args[0][0].key.id()) | 0.645902 | 0.307345 |
import hashlib
import urllib
import typing
import re
from mitmproxy import ctx
from mitmproxy import flow
from mitmproxy import exceptions
from mitmproxy import io
from mitmproxy import command
import mitmproxy.types
class ServerPlayback:
def __init__(self):
self.flowmap = {}
self.stop = False
self.final_flow = None
self.configured = False
def load(self, loader):
loader.add_option(
"server_replay_kill_extra", bool, False,
"Kill extra requests during replay."
)
loader.add_option(
"server_replay_nopop", bool, False,
"""
Don't remove flows from server replay state after use. This makes it
possible to replay same response multiple times.
"""
)
loader.add_option(
"server_replay_refresh", bool, True,
"""
Refresh server replay responses by adjusting date, expires and
last-modified headers, as well as adjusting cookie expiration.
"""
)
loader.add_option(
"server_replay_use_headers", typing.Sequence[str], [],
"Request headers to be considered during replay."
)
loader.add_option(
"server_replay", typing.Sequence[str], [],
"Replay server responses from a saved file."
)
loader.add_option(
"server_replay_ignore_content", bool, False,
"Ignore request's content while searching for a saved flow to replay."
)
loader.add_option(
"server_replay_ignore_params", typing.Sequence[str], [],
"""
Request's parameters to be ignored while searching for a saved flow
to replay.
"""
)
loader.add_option(
"server_replay_ignore_param_regex", str, "\d{13,13}",
"""
Regex to ignore a request's parameter while searching for a saved flow to replay
"""
)
loader.add_option(
"server_replay_ignore_payload_params", typing.Sequence[str], [],
"""
Request's payload parameters (application/x-www-form-urlencoded or
multipart/form-data) to be ignored while searching for a saved flow
to replay.
"""
)
loader.add_option(
"server_replay_ignore_host", bool, False,
"""
Ignore request's destination host while searching for a saved flow
to replay.
"""
)
@command.command("replay.server")
def load_flows(self, flows: typing.Sequence[flow.Flow]) -> None:
"""
Replay server responses from flows.
"""
self.flowmap = {}
for i in flows:
if i.response: # type: ignore
ctx.log.warn("=========================Add flow to flowmap=====================\n");
sh = self._hash(i)
l = self.flowmap.setdefault(sh, [])
l.append(i)
ctx.log.warn("Flows: {}\n".format(l))
ctx.log.warn("Flows hash: {}\n".format(sh))
ctx.log.warn("===================Finished adding flow to flowmap===============\n");
ctx.master.addons.trigger("update", [])
ctx.log.warn("=========================Finished loading all flows=======================")
@command.command("replay.server.file")
def load_file(self, path: mitmproxy.types.Path) -> None:
try:
flows = io.read_flows_from_paths([path])
except exceptions.FlowReadException as e:
raise exceptions.CommandError(str(e))
self.load_flows(flows)
@command.command("replay.server.stop")
def clear(self) -> None:
"""
Stop server replay.
"""
self.flowmap = {}
ctx.master.addons.trigger("update", [])
def count(self):
return sum([len(i) for i in self.flowmap.values()])
def _hash(self, flow):
"""
Calculates a loose hash of the flow request.
"""
r = flow.request
ctx.log.warn("Flow request url is: {}\n".format(r.url))
_, _, path, _, query, _ = urllib.parse.urlparse(r.url)
queriesArray = urllib.parse.parse_qsl(query, keep_blank_values=True)
# key: typing.List[typing.Any] = [str(r.port), str(r.scheme), str(r.method), str(path)]
key = [str(r.port), str(r.scheme), str(r.method), str(path)] # type: List[Any]
ctx.log.warn("Initial key: {}".format(key))
if not ctx.options.server_replay_ignore_content:
if ctx.options.server_replay_ignore_payload_params and r.multipart_form:
key.extend(
(k, v)
for k, v in r.multipart_form.items(multi=True)
if k.decode(errors="replace") not in ctx.options.server_replay_ignore_payload_params
)
elif ctx.options.server_replay_ignore_payload_params and r.urlencoded_form:
key.extend(
(k, v)
for k, v in r.urlencoded_form.items(multi=True)
if k not in ctx.options.server_replay_ignore_payload_params
)
else:
r.raw_content = re.sub(',"dateTime":"(.+?)"', '', '{}'.format(r.raw_content))
key.append(str(r.raw_content))
if not ctx.options.server_replay_ignore_host:
key.append(r.host)
filtered = []
ignore_params = ctx.options.server_replay_ignore_params or []
if ignore_params:
ignore_params = ignore_params[0].split(' ');
ctx.log.warn("ignore_params: {}".format(ignore_params))
ignore_param_regex = ctx.options.server_replay_ignore_param_regex
ctx.log.warn("Unfiltered queriesArray: {}\n".format(queriesArray))
for p in queriesArray:
if p[0] not in ignore_params:
filtered.append(p)
ctx.log.warn("Filtered by params, queriesArray: {}\n".format(filtered))
for p in filtered:
if ignore_param_regex and not re.compile(ignore_param_regex).match(p[0]):
key.append(p[0])
key.append(p[1])
else:
ctx.log.warn("Filtered param {} by regex".format(p))
ignore_headers = ctx.options.server_replay_use_headers or []
if ignore_headers:
headers = []
ignore_headers = ignore_headers[0].split(' ');
for i in ignore_headers:
v = r.headers.get(i)
headers.append((i, v))
key.append(headers)
ctx.log.warn("Final key before hashing: {}\n".format(key))
result = hashlib.sha256(repr(key).encode("utf8", "surrogateescape")).digest()
ctx.log.warn("Key hash: {}\n".format(result))
return result
def next_flow(self, request):
"""
Returns the next flow object, or None if no matching flow was
found.
"""
ctx.log.warn("----------------------------Start Hashing Request-------------------------\n")
hsh = self._hash(request)
ctx.log.warn("--------------------------Finished Hashing Request--------------------------")
if hsh in self.flowmap:
if ctx.options.server_replay_nopop:
return self.flowmap[hsh][0]
else:
ret = self.flowmap[hsh].pop(0)
if not self.flowmap[hsh]:
del self.flowmap[hsh]
return ret
def configure(self, updated):
if not self.configured and ctx.options.server_replay:
self.configured = True
try:
flows = io.read_flows_from_paths(ctx.options.server_replay)
except exceptions.FlowReadException as e:
raise exceptions.OptionsError(str(e))
self.load_flows(flows)
def tick(self):
if self.stop and not self.final_flow.live:
ctx.master.addons.trigger("processing_complete")
def request(self, f):
ctx.log.warn("===========================Start Matching Request=========================\n")
if self.flowmap:
rflow = self.next_flow(f)
ctx.log.warn("Matched response: {}".format(rflow))
if rflow:
response = rflow.response.copy()
response.is_replay = True
if ctx.options.server_replay_refresh:
response.refresh()
f.response = response
if not self.flowmap:
self.final_flow = f
self.stop = True
elif ctx.options.server_replay_kill_extra:
ctx.log.warn(
"Killing non-replay request with url {}".format(
f.request.url
)
)
f.reply.kill()
ctx.log.warn("\n=======================-Finished Matching Request=========================") | mitmproxy/addons/serverplayback.py | import hashlib
import urllib
import typing
import re
from mitmproxy import ctx
from mitmproxy import flow
from mitmproxy import exceptions
from mitmproxy import io
from mitmproxy import command
import mitmproxy.types
class ServerPlayback:
def __init__(self):
self.flowmap = {}
self.stop = False
self.final_flow = None
self.configured = False
def load(self, loader):
loader.add_option(
"server_replay_kill_extra", bool, False,
"Kill extra requests during replay."
)
loader.add_option(
"server_replay_nopop", bool, False,
"""
Don't remove flows from server replay state after use. This makes it
possible to replay same response multiple times.
"""
)
loader.add_option(
"server_replay_refresh", bool, True,
"""
Refresh server replay responses by adjusting date, expires and
last-modified headers, as well as adjusting cookie expiration.
"""
)
loader.add_option(
"server_replay_use_headers", typing.Sequence[str], [],
"Request headers to be considered during replay."
)
loader.add_option(
"server_replay", typing.Sequence[str], [],
"Replay server responses from a saved file."
)
loader.add_option(
"server_replay_ignore_content", bool, False,
"Ignore request's content while searching for a saved flow to replay."
)
loader.add_option(
"server_replay_ignore_params", typing.Sequence[str], [],
"""
Request's parameters to be ignored while searching for a saved flow
to replay.
"""
)
loader.add_option(
"server_replay_ignore_param_regex", str, "\d{13,13}",
"""
Regex to ignore a request's parameter while searching for a saved flow to replay
"""
)
loader.add_option(
"server_replay_ignore_payload_params", typing.Sequence[str], [],
"""
Request's payload parameters (application/x-www-form-urlencoded or
multipart/form-data) to be ignored while searching for a saved flow
to replay.
"""
)
loader.add_option(
"server_replay_ignore_host", bool, False,
"""
Ignore request's destination host while searching for a saved flow
to replay.
"""
)
@command.command("replay.server")
def load_flows(self, flows: typing.Sequence[flow.Flow]) -> None:
"""
Replay server responses from flows.
"""
self.flowmap = {}
for i in flows:
if i.response: # type: ignore
ctx.log.warn("=========================Add flow to flowmap=====================\n");
sh = self._hash(i)
l = self.flowmap.setdefault(sh, [])
l.append(i)
ctx.log.warn("Flows: {}\n".format(l))
ctx.log.warn("Flows hash: {}\n".format(sh))
ctx.log.warn("===================Finished adding flow to flowmap===============\n");
ctx.master.addons.trigger("update", [])
ctx.log.warn("=========================Finished loading all flows=======================")
@command.command("replay.server.file")
def load_file(self, path: mitmproxy.types.Path) -> None:
try:
flows = io.read_flows_from_paths([path])
except exceptions.FlowReadException as e:
raise exceptions.CommandError(str(e))
self.load_flows(flows)
@command.command("replay.server.stop")
def clear(self) -> None:
"""
Stop server replay.
"""
self.flowmap = {}
ctx.master.addons.trigger("update", [])
def count(self):
return sum([len(i) for i in self.flowmap.values()])
def _hash(self, flow):
"""
Calculates a loose hash of the flow request.
"""
r = flow.request
ctx.log.warn("Flow request url is: {}\n".format(r.url))
_, _, path, _, query, _ = urllib.parse.urlparse(r.url)
queriesArray = urllib.parse.parse_qsl(query, keep_blank_values=True)
# key: typing.List[typing.Any] = [str(r.port), str(r.scheme), str(r.method), str(path)]
key = [str(r.port), str(r.scheme), str(r.method), str(path)] # type: List[Any]
ctx.log.warn("Initial key: {}".format(key))
if not ctx.options.server_replay_ignore_content:
if ctx.options.server_replay_ignore_payload_params and r.multipart_form:
key.extend(
(k, v)
for k, v in r.multipart_form.items(multi=True)
if k.decode(errors="replace") not in ctx.options.server_replay_ignore_payload_params
)
elif ctx.options.server_replay_ignore_payload_params and r.urlencoded_form:
key.extend(
(k, v)
for k, v in r.urlencoded_form.items(multi=True)
if k not in ctx.options.server_replay_ignore_payload_params
)
else:
r.raw_content = re.sub(',"dateTime":"(.+?)"', '', '{}'.format(r.raw_content))
key.append(str(r.raw_content))
if not ctx.options.server_replay_ignore_host:
key.append(r.host)
filtered = []
ignore_params = ctx.options.server_replay_ignore_params or []
if ignore_params:
ignore_params = ignore_params[0].split(' ');
ctx.log.warn("ignore_params: {}".format(ignore_params))
ignore_param_regex = ctx.options.server_replay_ignore_param_regex
ctx.log.warn("Unfiltered queriesArray: {}\n".format(queriesArray))
for p in queriesArray:
if p[0] not in ignore_params:
filtered.append(p)
ctx.log.warn("Filtered by params, queriesArray: {}\n".format(filtered))
for p in filtered:
if ignore_param_regex and not re.compile(ignore_param_regex).match(p[0]):
key.append(p[0])
key.append(p[1])
else:
ctx.log.warn("Filtered param {} by regex".format(p))
ignore_headers = ctx.options.server_replay_use_headers or []
if ignore_headers:
headers = []
ignore_headers = ignore_headers[0].split(' ');
for i in ignore_headers:
v = r.headers.get(i)
headers.append((i, v))
key.append(headers)
ctx.log.warn("Final key before hashing: {}\n".format(key))
result = hashlib.sha256(repr(key).encode("utf8", "surrogateescape")).digest()
ctx.log.warn("Key hash: {}\n".format(result))
return result
def next_flow(self, request):
"""
Returns the next flow object, or None if no matching flow was
found.
"""
ctx.log.warn("----------------------------Start Hashing Request-------------------------\n")
hsh = self._hash(request)
ctx.log.warn("--------------------------Finished Hashing Request--------------------------")
if hsh in self.flowmap:
if ctx.options.server_replay_nopop:
return self.flowmap[hsh][0]
else:
ret = self.flowmap[hsh].pop(0)
if not self.flowmap[hsh]:
del self.flowmap[hsh]
return ret
def configure(self, updated):
if not self.configured and ctx.options.server_replay:
self.configured = True
try:
flows = io.read_flows_from_paths(ctx.options.server_replay)
except exceptions.FlowReadException as e:
raise exceptions.OptionsError(str(e))
self.load_flows(flows)
def tick(self):
if self.stop and not self.final_flow.live:
ctx.master.addons.trigger("processing_complete")
def request(self, f):
ctx.log.warn("===========================Start Matching Request=========================\n")
if self.flowmap:
rflow = self.next_flow(f)
ctx.log.warn("Matched response: {}".format(rflow))
if rflow:
response = rflow.response.copy()
response.is_replay = True
if ctx.options.server_replay_refresh:
response.refresh()
f.response = response
if not self.flowmap:
self.final_flow = f
self.stop = True
elif ctx.options.server_replay_kill_extra:
ctx.log.warn(
"Killing non-replay request with url {}".format(
f.request.url
)
)
f.reply.kill()
ctx.log.warn("\n=======================-Finished Matching Request=========================") | 0.476092 | 0.160135 |
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import threading
import subprocess
import tempfile
import shlex
from string import Template
from beets import ui, util, plugins, config
from beets.plugins import BeetsPlugin
from beets.util.confit import ConfigTypeError
from beets import art
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
# Some convenient alternate names for formats.
ALIASES = {
u'wma': u'windows media',
u'vorbis': u'ogg',
}
LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav']
def replace_ext(path, ext):
"""Return the path with its extension replaced by `ext`.
The new extension must not contain a leading dot.
"""
return os.path.splitext(path)[0] + b'.' + ext
def get_format(fmt=None):
"""Return the command tempate and the extension from the config.
"""
if not fmt:
fmt = config['convert']['format'].get(unicode).lower()
fmt = ALIASES.get(fmt, fmt)
try:
format_info = config['convert']['formats'][fmt].get(dict)
command = format_info['command']
extension = format_info['extension']
except KeyError:
raise ui.UserError(
u'convert: format {0} needs "command" and "extension" fields'
.format(fmt)
)
except ConfigTypeError:
command = config['convert']['formats'][fmt].get(bytes)
extension = fmt
# Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys()
if 'command' in keys:
command = config['convert']['command'].get(unicode)
elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
command = u'ffmpeg -i $source -y {0} $dest'.format(
config['convert']['opts'].get(unicode)
)
if 'extension' in keys:
extension = config['convert']['extension'].get(unicode)
return (command.encode('utf8'), extension.encode('utf8'))
def should_transcode(item, fmt):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
if config['convert']['never_convert_lossy_files'] and \
not (item.format.lower() in LOSSLESS_FORMATS):
return False
maxbr = config['convert']['max_bitrate'].get(int)
return fmt.lower() != item.format.lower() or \
item.bitrate >= 1000 * maxbr
class ConvertPlugin(BeetsPlugin):
def __init__(self):
super(ConvertPlugin, self).__init__()
self.config.add({
u'dest': None,
u'pretend': False,
u'threads': util.cpu_count(),
u'format': u'mp3',
u'formats': {
u'aac': {
u'command': u'ffmpeg -i $source -y -vn -acodec libfaac '
u'-aq 100 $dest',
u'extension': u'm4a',
},
u'alac': {
u'command': u'ffmpeg -i $source -y -vn -acodec alac $dest',
u'extension': u'm4a',
},
u'flac': u'ffmpeg -i $source -y -vn -acodec flac $dest',
u'mp3': u'ffmpeg -i $source -y -vn -aq 2 $dest',
u'opus':
u'ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest',
u'ogg':
u'ffmpeg -i $source -y -vn -acodec libvorbis -aq 2 $dest',
u'wma':
u'ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest',
},
u'max_bitrate': 500,
u'auto': False,
u'tmpdir': None,
u'quiet': False,
u'embed': True,
u'paths': {},
u'never_convert_lossy_files': False,
u'copy_album_art': False,
})
self.import_stages = [self.auto_convert]
self.register_listener('import_task_files', self._cleanup)
def commands(self):
cmd = ui.Subcommand('convert', help='convert to external location')
cmd.parser.add_option('-p', '--pretend', action='store_true',
help='show actions but do nothing')
cmd.parser.add_option('-t', '--threads', action='store', type='int',
help='change the number of threads, \
defaults to maximum available processors')
cmd.parser.add_option('-k', '--keep-new', action='store_true',
dest='keep_new', help='keep only the converted \
and move the old files')
cmd.parser.add_option('-d', '--dest', action='store',
help='set the destination directory')
cmd.parser.add_option('-f', '--format', action='store', dest='format',
help='set the target format of the tracks')
cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes',
help='do not ask for confirmation')
cmd.parser.add_album_option()
cmd.func = self.convert_func
return [cmd]
def auto_convert(self, config, task):
if self.config['auto']:
for item in task.imported_items():
self.convert_on_import(config.lib, item)
# Utilities converted from functions to methods on logging overhaul
def encode(self, command, source, dest, pretend=False):
"""Encode `source` to `dest` using command template `command`.
Raises `subprocess.CalledProcessError` if the command exited with a
non-zero status code.
"""
# The paths and arguments must be bytes.
assert isinstance(command, bytes)
assert isinstance(source, bytes)
assert isinstance(dest, bytes)
quiet = self.config['quiet'].get(bool)
if not quiet and not pretend:
self._log.info(u'Encoding {0}', util.displayable_path(source))
# Substitute $source and $dest in the argument list.
args = shlex.split(command)
for i, arg in enumerate(args):
args[i] = Template(arg).safe_substitute({
b'source': source,
b'dest': dest,
})
if pretend:
self._log.info(' '.join(args))
return
try:
util.command_output(args)
except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files
self._log.info(u'Encoding {0} failed. Cleaning up...',
util.displayable_path(source))
self._log.debug(u'Command {0} exited with status {1}',
exc.cmd.decode('utf8', 'ignore'),
exc.returncode)
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
raise
except OSError as exc:
raise ui.UserError(
u"convert: could invoke '{0}': {1}".format(
' '.join(args), exc
)
)
if not quiet and not pretend:
self._log.info(u'Finished encoding {0}',
util.displayable_path(source))
def convert_item(self, dest_dir, keep_new, path_formats, fmt,
pretend=False):
command, ext = get_format(fmt)
item, original, converted = None, None, None
while True:
item = yield (item, original, converted)
dest = item.destination(basedir=dest_dir,
path_formats=path_formats)
# When keeping the new file in the library, we first move the
# current (pristine) file to the destination. We'll then copy it
# back to its old path or transcode it to a new path.
if keep_new:
original = dest
converted = item.path
if should_transcode(item, fmt):
converted = replace_ext(converted, ext)
else:
original = item.path
if should_transcode(item, fmt):
dest = replace_ext(dest, ext)
converted = dest
# Ensure that only one thread tries to create directories at a
# time. (The existence check is not atomic with the directory
# creation inside this function.)
if not pretend:
with _fs_lock:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(item.path))
continue
if keep_new:
if pretend:
self._log.info(u'mv {0} {1}',
util.displayable_path(item.path),
util.displayable_path(original))
else:
self._log.info(u'Moving to {0}',
util.displayable_path(original))
util.move(item.path, original)
if should_transcode(item, fmt):
try:
self.encode(command, original, converted, pretend)
except subprocess.CalledProcessError:
continue
else:
if pretend:
self._log.info(u'cp {0} {1}',
util.displayable_path(original),
util.displayable_path(converted))
else:
# No transcoding necessary.
self._log.info(u'Copying {0}',
util.displayable_path(item.path))
util.copy(original, converted)
if pretend:
continue
# Write tags from the database to the converted file.
item.try_write(path=converted)
if keep_new:
# If we're keeping the transcoded file, read it again (after
# writing) to get new bitrate, duration, etc.
item.path = converted
item.read()
item.store() # Store new path and audio data.
if self.config['embed']:
album = item.get_album()
if album and album.artpath:
art.embed_item(self._log, item, album.artpath,
itempath=converted)
if keep_new:
plugins.send('after_convert', item=item,
dest=dest, keepnew=True)
else:
plugins.send('after_convert', item=item,
dest=converted, keepnew=False)
def copy_album_art(self, album, dest_dir, path_formats, pretend=False):
"""Copies the associated cover art of the album. Album must have at
least one track.
"""
if not album or not album.artpath:
return
album_item = album.items().get()
# Album shouldn't be empty.
if not album_item:
return
# Get the destination of the first item (track) of the album, we use
# this function to format the path accordingly to path_formats.
dest = album_item.destination(basedir=dest_dir,
path_formats=path_formats)
# Remove item from the path.
dest = os.path.join(*util.components(dest)[:-1])
dest = album.art_destination(album.artpath, item_dir=dest)
if album.artpath == dest:
return
if not pretend:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(album.artpath))
return
if pretend:
self._log.info(u'cp {0} {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
else:
self._log.info(u'Copying cover art to {0}',
util.displayable_path(dest))
util.copy(album.artpath, dest)
def convert_func(self, lib, opts, args):
if not opts.dest:
opts.dest = self.config['dest'].get()
if not opts.dest:
raise ui.UserError('no convert destination set')
opts.dest = util.bytestring_path(opts.dest)
if not opts.threads:
opts.threads = self.config['threads'].get(int)
if self.config['paths']:
path_formats = ui.get_path_formats(self.config['paths'])
else:
path_formats = ui.get_path_formats()
if not opts.format:
opts.format = self.config['format'].get(unicode).lower()
pretend = opts.pretend if opts.pretend is not None else \
self.config['pretend'].get(bool)
if not pretend:
ui.commands.list_items(lib, ui.decargs(args), opts.album)
if not (opts.yes or ui.input_yn("Convert? (Y/n)")):
return
if opts.album:
albums = lib.albums(ui.decargs(args))
items = (i for a in albums for i in a.items())
if self.config['copy_album_art']:
for album in albums:
self.copy_album_art(album, opts.dest, path_formats,
pretend)
else:
items = iter(lib.items(ui.decargs(args)))
convert = [self.convert_item(opts.dest,
opts.keep_new,
path_formats,
opts.format,
pretend)
for _ in range(opts.threads)]
pipe = util.pipeline.Pipeline([items, convert])
pipe.run_parallel()
def convert_on_import(self, lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
fmt = self.config['format'].get(unicode).lower()
if should_transcode(item, fmt):
command, ext = get_format()
tmpdir = self.config['tmpdir'].get()
fd, dest = tempfile.mkstemp('.' + ext, dir=tmpdir)
dest = util.bytestring_path(dest)
os.close(fd)
_temp_files.append(dest) # Delete the transcode later.
try:
self.encode(command, item.path, dest)
except subprocess.CalledProcessError:
return
item.path = dest
item.write()
item.read() # Load new audio information data.
item.store()
def _cleanup(self, task, session):
for path in task.old_paths:
if path in _temp_files:
if os.path.isfile(path):
util.remove(path)
_temp_files.remove(path) | beetsplug/convert.py | from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import threading
import subprocess
import tempfile
import shlex
from string import Template
from beets import ui, util, plugins, config
from beets.plugins import BeetsPlugin
from beets.util.confit import ConfigTypeError
from beets import art
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
# Some convenient alternate names for formats.
ALIASES = {
u'wma': u'windows media',
u'vorbis': u'ogg',
}
LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav']
def replace_ext(path, ext):
"""Return the path with its extension replaced by `ext`.
The new extension must not contain a leading dot.
"""
return os.path.splitext(path)[0] + b'.' + ext
def get_format(fmt=None):
"""Return the command tempate and the extension from the config.
"""
if not fmt:
fmt = config['convert']['format'].get(unicode).lower()
fmt = ALIASES.get(fmt, fmt)
try:
format_info = config['convert']['formats'][fmt].get(dict)
command = format_info['command']
extension = format_info['extension']
except KeyError:
raise ui.UserError(
u'convert: format {0} needs "command" and "extension" fields'
.format(fmt)
)
except ConfigTypeError:
command = config['convert']['formats'][fmt].get(bytes)
extension = fmt
# Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys()
if 'command' in keys:
command = config['convert']['command'].get(unicode)
elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
command = u'ffmpeg -i $source -y {0} $dest'.format(
config['convert']['opts'].get(unicode)
)
if 'extension' in keys:
extension = config['convert']['extension'].get(unicode)
return (command.encode('utf8'), extension.encode('utf8'))
def should_transcode(item, fmt):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
if config['convert']['never_convert_lossy_files'] and \
not (item.format.lower() in LOSSLESS_FORMATS):
return False
maxbr = config['convert']['max_bitrate'].get(int)
return fmt.lower() != item.format.lower() or \
item.bitrate >= 1000 * maxbr
class ConvertPlugin(BeetsPlugin):
def __init__(self):
super(ConvertPlugin, self).__init__()
self.config.add({
u'dest': None,
u'pretend': False,
u'threads': util.cpu_count(),
u'format': u'mp3',
u'formats': {
u'aac': {
u'command': u'ffmpeg -i $source -y -vn -acodec libfaac '
u'-aq 100 $dest',
u'extension': u'm4a',
},
u'alac': {
u'command': u'ffmpeg -i $source -y -vn -acodec alac $dest',
u'extension': u'm4a',
},
u'flac': u'ffmpeg -i $source -y -vn -acodec flac $dest',
u'mp3': u'ffmpeg -i $source -y -vn -aq 2 $dest',
u'opus':
u'ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest',
u'ogg':
u'ffmpeg -i $source -y -vn -acodec libvorbis -aq 2 $dest',
u'wma':
u'ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest',
},
u'max_bitrate': 500,
u'auto': False,
u'tmpdir': None,
u'quiet': False,
u'embed': True,
u'paths': {},
u'never_convert_lossy_files': False,
u'copy_album_art': False,
})
self.import_stages = [self.auto_convert]
self.register_listener('import_task_files', self._cleanup)
def commands(self):
cmd = ui.Subcommand('convert', help='convert to external location')
cmd.parser.add_option('-p', '--pretend', action='store_true',
help='show actions but do nothing')
cmd.parser.add_option('-t', '--threads', action='store', type='int',
help='change the number of threads, \
defaults to maximum available processors')
cmd.parser.add_option('-k', '--keep-new', action='store_true',
dest='keep_new', help='keep only the converted \
and move the old files')
cmd.parser.add_option('-d', '--dest', action='store',
help='set the destination directory')
cmd.parser.add_option('-f', '--format', action='store', dest='format',
help='set the target format of the tracks')
cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes',
help='do not ask for confirmation')
cmd.parser.add_album_option()
cmd.func = self.convert_func
return [cmd]
def auto_convert(self, config, task):
if self.config['auto']:
for item in task.imported_items():
self.convert_on_import(config.lib, item)
# Utilities converted from functions to methods on logging overhaul
def encode(self, command, source, dest, pretend=False):
"""Encode `source` to `dest` using command template `command`.
Raises `subprocess.CalledProcessError` if the command exited with a
non-zero status code.
"""
# The paths and arguments must be bytes.
assert isinstance(command, bytes)
assert isinstance(source, bytes)
assert isinstance(dest, bytes)
quiet = self.config['quiet'].get(bool)
if not quiet and not pretend:
self._log.info(u'Encoding {0}', util.displayable_path(source))
# Substitute $source and $dest in the argument list.
args = shlex.split(command)
for i, arg in enumerate(args):
args[i] = Template(arg).safe_substitute({
b'source': source,
b'dest': dest,
})
if pretend:
self._log.info(' '.join(args))
return
try:
util.command_output(args)
except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files
self._log.info(u'Encoding {0} failed. Cleaning up...',
util.displayable_path(source))
self._log.debug(u'Command {0} exited with status {1}',
exc.cmd.decode('utf8', 'ignore'),
exc.returncode)
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
raise
except OSError as exc:
raise ui.UserError(
u"convert: could invoke '{0}': {1}".format(
' '.join(args), exc
)
)
if not quiet and not pretend:
self._log.info(u'Finished encoding {0}',
util.displayable_path(source))
def convert_item(self, dest_dir, keep_new, path_formats, fmt,
pretend=False):
command, ext = get_format(fmt)
item, original, converted = None, None, None
while True:
item = yield (item, original, converted)
dest = item.destination(basedir=dest_dir,
path_formats=path_formats)
# When keeping the new file in the library, we first move the
# current (pristine) file to the destination. We'll then copy it
# back to its old path or transcode it to a new path.
if keep_new:
original = dest
converted = item.path
if should_transcode(item, fmt):
converted = replace_ext(converted, ext)
else:
original = item.path
if should_transcode(item, fmt):
dest = replace_ext(dest, ext)
converted = dest
# Ensure that only one thread tries to create directories at a
# time. (The existence check is not atomic with the directory
# creation inside this function.)
if not pretend:
with _fs_lock:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(item.path))
continue
if keep_new:
if pretend:
self._log.info(u'mv {0} {1}',
util.displayable_path(item.path),
util.displayable_path(original))
else:
self._log.info(u'Moving to {0}',
util.displayable_path(original))
util.move(item.path, original)
if should_transcode(item, fmt):
try:
self.encode(command, original, converted, pretend)
except subprocess.CalledProcessError:
continue
else:
if pretend:
self._log.info(u'cp {0} {1}',
util.displayable_path(original),
util.displayable_path(converted))
else:
# No transcoding necessary.
self._log.info(u'Copying {0}',
util.displayable_path(item.path))
util.copy(original, converted)
if pretend:
continue
# Write tags from the database to the converted file.
item.try_write(path=converted)
if keep_new:
# If we're keeping the transcoded file, read it again (after
# writing) to get new bitrate, duration, etc.
item.path = converted
item.read()
item.store() # Store new path and audio data.
if self.config['embed']:
album = item.get_album()
if album and album.artpath:
art.embed_item(self._log, item, album.artpath,
itempath=converted)
if keep_new:
plugins.send('after_convert', item=item,
dest=dest, keepnew=True)
else:
plugins.send('after_convert', item=item,
dest=converted, keepnew=False)
def copy_album_art(self, album, dest_dir, path_formats, pretend=False):
"""Copies the associated cover art of the album. Album must have at
least one track.
"""
if not album or not album.artpath:
return
album_item = album.items().get()
# Album shouldn't be empty.
if not album_item:
return
# Get the destination of the first item (track) of the album, we use
# this function to format the path accordingly to path_formats.
dest = album_item.destination(basedir=dest_dir,
path_formats=path_formats)
# Remove item from the path.
dest = os.path.join(*util.components(dest)[:-1])
dest = album.art_destination(album.artpath, item_dir=dest)
if album.artpath == dest:
return
if not pretend:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(album.artpath))
return
if pretend:
self._log.info(u'cp {0} {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
else:
self._log.info(u'Copying cover art to {0}',
util.displayable_path(dest))
util.copy(album.artpath, dest)
def convert_func(self, lib, opts, args):
if not opts.dest:
opts.dest = self.config['dest'].get()
if not opts.dest:
raise ui.UserError('no convert destination set')
opts.dest = util.bytestring_path(opts.dest)
if not opts.threads:
opts.threads = self.config['threads'].get(int)
if self.config['paths']:
path_formats = ui.get_path_formats(self.config['paths'])
else:
path_formats = ui.get_path_formats()
if not opts.format:
opts.format = self.config['format'].get(unicode).lower()
pretend = opts.pretend if opts.pretend is not None else \
self.config['pretend'].get(bool)
if not pretend:
ui.commands.list_items(lib, ui.decargs(args), opts.album)
if not (opts.yes or ui.input_yn("Convert? (Y/n)")):
return
if opts.album:
albums = lib.albums(ui.decargs(args))
items = (i for a in albums for i in a.items())
if self.config['copy_album_art']:
for album in albums:
self.copy_album_art(album, opts.dest, path_formats,
pretend)
else:
items = iter(lib.items(ui.decargs(args)))
convert = [self.convert_item(opts.dest,
opts.keep_new,
path_formats,
opts.format,
pretend)
for _ in range(opts.threads)]
pipe = util.pipeline.Pipeline([items, convert])
pipe.run_parallel()
def convert_on_import(self, lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
fmt = self.config['format'].get(unicode).lower()
if should_transcode(item, fmt):
command, ext = get_format()
tmpdir = self.config['tmpdir'].get()
fd, dest = tempfile.mkstemp('.' + ext, dir=tmpdir)
dest = util.bytestring_path(dest)
os.close(fd)
_temp_files.append(dest) # Delete the transcode later.
try:
self.encode(command, item.path, dest)
except subprocess.CalledProcessError:
return
item.path = dest
item.write()
item.read() # Load new audio information data.
item.store()
def _cleanup(self, task, session):
for path in task.old_paths:
if path in _temp_files:
if os.path.isfile(path):
util.remove(path)
_temp_files.remove(path) | 0.654453 | 0.108189 |
import sys
import unittest
from conduit import Node
import numpy as np
class Test_Conduit_Node(unittest.TestCase):
def test_simple(self):
a_val = np.uint32(10)
b_val = np.uint32(20)
c_val = np.float64(30.0)
n = Node()
n['a'] = a_val
n['b'] = b_val
n['c'] = c_val
self.assertTrue(n['a'] == a_val)
self.assertTrue(n['b'] == b_val)
self.assertTrue(n['c'] == c_val)
def test_nested(self):
val = np.uint32(10)
n = Node()
n['a']['b'] = val
print(n['a']['b'])
self.assertEqual(n['a']['b'],val)
def test_vector(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
self.assertEqual(n['a'][99], 99)
def test_fetch(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
na = n.fetch('a')
na_val = na.value()
self.assertEqual(na_val[99], 99)
def test_child(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
na = n.child(0)
na_val = na.value()
self.assertEqual(na_val[99], 99)
n['b'] = vec
self.assertEqual(n.number_of_children(),2)
def test_save_load(self):
# on windows, this breaks at 27 !?
alen = 26
vec = np.array(range(alen), np.uint32)
n = Node()
n['a'] = vec
print(n)
n.save("test_pyconduit_node_save_load.conduit_bin")
nl = Node()
nl.load("test_pyconduit_node_save_load.conduit_bin")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
n.save("test_pyconduit_node_json_save_load.json",protocol="json")
nl = Node()
nl.load("test_pyconduit_node_json_save_load.json", protocol="json")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
n.save("test_pyconduit_node_base64_json_save_load.conduit_base64_json", protocol="conduit_base64_json")
nl = Node()
nl.load("test_pyconduit_node_base64_json_save_load.conduit_base64_json", protocol="conduit_base64_json")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
n.save("test_pyconduit_node_json_save_load.yaml",protocol="yaml")
nl = Node()
nl.load("test_pyconduit_node_json_save_load.yaml", protocol="yaml")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
def test_parse(self):
n = Node()
n.parse('{"a": 42.0}',"json")
self.assertTrue(n['a'] == np.float64(42.0))
n.parse('a: 52.0',"yaml")
self.assertTrue(n['a'] == np.float64(52.0))
def test_parent(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
na = n.fetch('a')
self.assertFalse(na.is_root())
# todo: test parent()
def test_total_bytes(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
self.assertEqual(n.total_strided_bytes(),4 * 100)
self.assertEqual(n.total_bytes_compact(),4 * 100)
# TODO: check if n.is_compact() should pass as well?
# it doesn't currently
self.assertTrue(n.fetch('a').is_compact())
def test_paths(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
for v in ['a','b','c']:
self.assertTrue(n.has_path(v))
paths = n.child_names()
for v in ['a','b','c']:
self.assertTrue(v in paths)
def test_list(self):
n = Node()
n.append().set(1)
self.assertTrue(n.child(0).value(),1)
self.assertTrue(n[0],1)
n2 = Node()
n2_c = n2.append()
n2_c.set(2)
self.assertEqual(n2.child(0).value(),2)
n3 = Node()
n3.fetch("here").append().set("a")
n3.fetch("here").append().set("b")
self.assertTrue(n3.fetch("here").child(0).value(),"a")
self.assertTrue(n3.fetch("here").child(1).value(),"b")
n4 = Node()
n4["here"].append().set("a")
n5 = n4["here"]
n5.append().set("b")
self.assertTrue(n4["here"].child(0).value(),"a")
self.assertTrue(n4["here"].child(1).value(),"b")
self.assertTrue(n4["here"][0],"a")
self.assertTrue(n4["here"][1],"b")
def test_remove(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
self.assertEqual(n.number_of_children(),3)
n.remove(path='c')
self.assertEqual(n.number_of_children(),2)
paths = n.child_names()
for v in ['a','b']:
self.assertTrue(v in paths)
n.remove(index=0)
paths = n.child_names()
for v in ['b']:
self.assertTrue(v in paths)
def test_info(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
ni = n.info();
#print ni
self.assertEqual(ni["total_strided_bytes"],n.total_strided_bytes())
def test_set_all_types(self):
types = [ 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64']
for type in types:
data = np.array(range(10), dtype=type)
n = Node()
n.set(data)
for i in range(len(data)):
self.assertEqual(n.value()[i], data[i])
def test_set_external(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
for type in types:
ext_data = np.array(range(10), dtype=type)
n = Node()
n.set_external(ext_data)
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
ext_data[5] = 11
n.value()[8] = 77
n.value()[2] = 8
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
def test_set_external_node(self):
n = Node()
n.set(np.array(range(10), np.int32))
n2 = Node()
# test set external with node
n2.set_external(n)
for i in range(10):
self.assertEqual(n.value()[i], n2.value()[i])
n.value()[2] = 8
n.value()[8] = 77
# set of n should reflect in n2 with set_external
self.assertEqual(8, n2.value()[2])
self.assertEqual(77, n2.value()[8])
def test_set_external_basic_slice(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
for type in types:
base_data = np.array(range(20), dtype=type)
ext_data = base_data[1:16]
n = Node()
n.set_external(ext_data)
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
ext_data[5] = 11
n.value()[6] = 77
n.value()[2] = 8
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
def test_set_external_basic_strides(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
for type in types:
base_data = np.array(range(20), dtype=type)
ext_data = base_data[1:16:2]
n = Node()
n.set_external(ext_data)
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
ext_data[5] = 11
n.value()[6] = 77
n.value()[2] = 8
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
def test_diff(self):
n1 = Node()
n2 = Node()
info = Node()
n1['a'] = 1
self.assertTrue(n1.diff(n2,info))
print(info)
n2['a'] = 1
self.assertFalse(n1.diff(n2,info))
n2['b'] = 2.0
self.assertTrue(n1.diff(n2,info))
self.assertFalse(n1.diff_compatible(n2,info))
n1['b'] = 1.0
self.assertFalse(n1.diff(n2,info,10))
def test_list_of_ints(self):
# also covered by test_set_all_types
# but this was the reproducer for
# https://github.com/LLNL/conduit/issues/281
n = Node()
a = np.array(list((1,2,3)))
n['a'] = a
self.assertEqual(n['a'][0], 1)
self.assertEqual(n['a'][1], 2)
self.assertEqual(n['a'][2], 3)
def test_compact_to(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
ni = n.info()
self.assertEqual(ni["mem_spaces"].number_of_children(), 3)
n2 = Node()
n.compact_to(n2)
ni = n2.info()
print(ni)
self.assertEqual(ni["mem_spaces"].number_of_children(), 1)
def test_update(self):
n = Node()
data = np.array(range(10), dtype='float64')
n["data"].set_external(data)
print(n)
n2 = Node()
n2.update(n)
print(n2)
self.assertEqual(n2["data"][0],0)
n3 = Node()
n3.update_external(n)
data[0] = 10
print(n3)
self.assertEqual(n3["data"][0],10)
n4 = Node()
n4["data"] = 10
n4.update_compatible(n)
print(n4)
self.assertEqual(n4["data"],10)
def test_reset(self):
n = Node()
data = np.array(range(10), dtype='float64')
n["data"].set_external(data)
print(n)
n.reset()
self.assertEqual(n.number_of_children(), 0)
def test_child_rename(self):
a_val = np.uint32(10)
b_val = np.uint32(20)
n = Node()
with self.assertRaises(Exception):
n.rename_child('a','b')
n['a'] = a_val
n['b'] = b_val
with self.assertRaises(Exception):
n.rename_child('bad','good')
with self.assertRaises(Exception):
n.rename_child('b','a')
self.assertTrue(n['a'] == a_val)
self.assertTrue(n['b'] == b_val)
n.rename_child('b','c')
self.assertTrue(n['a'] == a_val)
self.assertTrue(n['c'] == b_val)
def test_string(self):
n = Node();
n.set("my string!")
print(n)
self.assertEqual(n.value(),"my string!")
# test numpy string
nps = np.string_("my numpy string!")
n.set(nps)
print(n)
print(repr(n))
self.assertEqual(n.value(),"my numpy string!")
aofstrs = np.array(["here","are","a","few","strings"])
print(aofstrs)
n.set(aofstrs)
print(n)
self.assertEqual(n[0],"here")
self.assertEqual(n[1],"are")
self.assertEqual(n[2],"a")
self.assertEqual(n[3],"few")
self.assertEqual(n[4],"strings")
def test_numeric_tuples(self):
n = Node()
n["tuple_0"].set((1, 2, 3, 4))
n["tuple_1"].set((1.0, 2.0, 3.0, 4.0))
n["tuple_2"].set((1, 2, 3, 4.0))
print(n)
self.assertEqual(n['tuple_0'][0], 1)
self.assertEqual(n['tuple_0'][1], 2)
self.assertEqual(n['tuple_0'][2], 3)
self.assertEqual(n['tuple_0'][3], 4)
self.assertEqual(n['tuple_1'][0], 1.0)
self.assertEqual(n['tuple_1'][1], 2.0)
self.assertEqual(n['tuple_1'][2], 3.0)
self.assertEqual(n['tuple_1'][3], 4.0)
self.assertEqual(n['tuple_2'][0], 1.0)
self.assertEqual(n['tuple_2'][1], 2.0)
self.assertEqual(n['tuple_2'][2], 3.0)
self.assertEqual(n['tuple_2'][3], 4.0)
def test_numeric_lists(self):
n = Node()
n["list_0"].set((1, 2, 3, 4))
n["list_1"].set((1.0, 2.0, 3.0, 4.0))
n["list_2"].set((1, 2, 3, 4.0))
print(n)
self.assertEqual(n['list_0'][0], 1)
self.assertEqual(n['list_0'][1], 2)
self.assertEqual(n['list_0'][2], 3)
self.assertEqual(n['list_0'][3], 4)
self.assertEqual(n['list_1'][0], 1.0)
self.assertEqual(n['list_1'][1], 2.0)
self.assertEqual(n['list_1'][2], 3.0)
self.assertEqual(n['list_1'][3], 4.0)
self.assertEqual(n['list_2'][0], 1.0)
self.assertEqual(n['list_2'][1], 2.0)
self.assertEqual(n['list_2'][2], 3.0)
self.assertEqual(n['list_2'][3], 4.0)
def test_general_tuples(self):
n = Node()
n.set((1, "here"))
print(n)
self.assertEqual(n[0], 1.0)
self.assertEqual(n[1], "here")
def test_general_lists(self):
n = Node()
n.set([1, "there"])
print(n)
self.assertEqual(n[0], 1.0)
self.assertEqual(n[1], "there")
def test_key_with_slash(self):
n = Node()
n["normal/path"] = 10
n.add_child("child_with_/_inside").set(42)
print(n)
self.assertTrue(n.has_path("normal/path"))
self.assertFalse(n.has_child("normal/path"))
self.assertFalse(n.has_path("child_with_/_inside"))
self.assertTrue(n.has_child("child_with_/_inside"))
self.assertEqual(2,n.number_of_children())
self.assertEqual(n["normal/path"],10);
self.assertEqual(n.child(name="child_with_/_inside").value(),42);
n["normal"].remove_child("path")
self.assertFalse(n.has_path("normal/path"))
def test_fetch_existing(self):
n = Node()
n["my/path"] = 10
n_sub = n.fetch_existing("my/path")
self.assertEqual(n_sub.value(),10);
with self.assertRaises(Exception):
n.fetch_existing('bad/path')
def test_to_string(self):
a_val = np.uint32(10)
b_val = np.uint32(20)
c_val = np.float64(30.0)
n = Node()
n['a'] = a_val
n['b'] = b_val
n['c'] = c_val
res_to_str_def = n.to_string()
res_to_str_yaml = n.to_string(protocol="yaml")
res_to_str_json = n.to_string(protocol="json")
res_to_yaml = n.to_yaml()
res_to_json = n.to_json()
self.assertEqual(res_to_str_def, res_to_yaml);
self.assertEqual(res_to_str_yaml, res_to_yaml);
self.assertEqual(res_to_str_json, res_to_json);
n.print_detailed()
def test_numpy_slice_as_set_input(self):
n = Node()
# slice with non trivial strides
numpy_array = np.array(range(21), dtype='float64')
v = numpy_array.reshape((3, 7))
print("Input Array")
print(v)
print("Desired Slice")
print(v[:,0])
n['v'] = v
n['vs'] = v[:,0]
n['vs_expected'] = np.array(v[:,0],np.float64)
print(n)
sdiff = np.setdiff1d(n['vs'], v[:,0])
print("Set Difference: ",sdiff )
self.assertEqual(len(sdiff), 0);
# a more complex slice
numpy_array = np.array(range(105), dtype='float64')
v = numpy_array.reshape((3, 7, 5))
print("Input Array")
print(v)
print("Desired Slice")
print(v[:,0,3:5])
n['v'] = v
n['vs'] = v[:,0,3:5]
n['vs_expected'] = np.array(v[:,0,3:5],np.float64)
print(n)
sdiff = np.setdiff1d(n['vs'], v[:,0,3:5])
print("Set Difference: ",sdiff )
self.assertEqual(len(sdiff), 0);
def test_numpy_slice_as_set_external_input(self):
n = Node()
# slice with non trivial strides
numpy_array = np.array(range(21), dtype='float64')
v = numpy_array.reshape((3, 7))
print("Input Array")
print(v)
print("Desired Slice")
print(v[:,0])
n['v'] = v
n['vs'].set_external(v[:,0])
n['vs_expected'] = np.array(v[:,0],np.float64)
print(n)
sdiff = np.setdiff1d(n['vs'], v[:,0])
print("Set Difference: ",sdiff )
self.assertEqual(len(sdiff), 0);
# a more complex slice, can't use set external here.
n = Node()
numpy_array = np.array(range(105), dtype='float64')
v = numpy_array.reshape((3, 7, 5))
with self.assertRaises(TypeError):
n['vs'].set_external(v[:,0,3:5])
# lets do a 1-d eff slice, this should work since
# it reduces to a 1-D strided case
n['vs'].set_external(v[:,0,0])
n['vs_expected'] = np.array(v[:,0,0],np.float64)
def test_describe(self):
n = Node()
n["a"] = [1,2,3,4,5];
n["b"] = [1,2,3];
n["c"] = [1,2,3,4,5,6];
n["d"] = [1,2,3,4,5,6,7];
n["e"] = [1,2,3,4,5,6,7,8,9,10,11,12];
n["f"] = [1.0,2.0,3.0,4.0,5.0,6.0,7.0];
n["g"] = [2.0,4.0];
d = n.describe()
print(d)
self.assertEqual(d["a/count"],5);
self.assertEqual(d["b/count"],3);
self.assertEqual(d["c/count"],6);
self.assertEqual(d["d/count"],7);
self.assertEqual(d["e/count"],12);
self.assertEqual(d["f/count"],7);
self.assertEqual(d["a/min"],1)
self.assertEqual(d["b/min"],1)
self.assertEqual(d["c/min"],1)
self.assertEqual(d["d/min"],1)
self.assertEqual(d["e/min"],1)
self.assertEqual(d["f/min"],1.0)
self.assertEqual(d["a/max"],5)
self.assertEqual(d["b/max"],3)
self.assertEqual(d["c/max"],6)
self.assertEqual(d["d/max"],7)
self.assertEqual(d["e/max"],12)
self.assertEqual(d["f/max"],7.0)
self.assertEqual(d["g/mean"],3.0);
opts = Node()
opts["threshold"] = 10
d = n.describe(opts)
print(d)
def test_summary_string(self):
n = Node()
n["a"] = [1,2,3,4,5];
n["b"] = [1,2,3];
n["c"] = [1,2,3,4,5,6];
n["d"] = [1,2,3,4,5,6,7];
n["e"] = [1,2,3,4,5,6,7,8,9,10,11,12];
n["f"] = [1.0,2.0,3.0,4.0,5.0,6.0,7.0];
n["g"] = [2.0,4.0];
print(repr(n))
r = n.to_summary_string()
print(r)
texp = """
a: [1, 2, 3, 4, 5]
b: [1, 2, 3]
c: [1, 2, 3, ..., 5, 6]
d: [1, 2, 3, ..., 6, 7]
e: [1, 2, 3, ..., 11, 12]
f: [1.0, 2.0, 3.0, ..., 6.0, 7.0]
g: [2.0, 4.0]
"""
self.assertEqual(r,texp)
opts = Node()
opts["num_children_threshold"] = 2
opts["num_elements_threshold"] = 3
r = n.to_summary_string(opts)
print(r)
texp = """
a: [1, 2, ..., 5]
... ( skipped 5 children )
g: [2.0, 4.0]
"""
self.assertEqual(r,texp)
r = n.to_summary_string(opts=opts)
print(r)
self.assertEqual(r,texp)
opts = Node()
opts["num_children_threshold"] = 100
opts["num_elements_threshold"] = -1
r = n.to_summary_string(opts)
print(r)
self.assertEqual(r,n.to_yaml())
if __name__ == '__main__':
unittest.main() | src/tests/conduit/python/t_python_conduit_node.py | import sys
import unittest
from conduit import Node
import numpy as np
class Test_Conduit_Node(unittest.TestCase):
def test_simple(self):
a_val = np.uint32(10)
b_val = np.uint32(20)
c_val = np.float64(30.0)
n = Node()
n['a'] = a_val
n['b'] = b_val
n['c'] = c_val
self.assertTrue(n['a'] == a_val)
self.assertTrue(n['b'] == b_val)
self.assertTrue(n['c'] == c_val)
def test_nested(self):
val = np.uint32(10)
n = Node()
n['a']['b'] = val
print(n['a']['b'])
self.assertEqual(n['a']['b'],val)
def test_vector(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
self.assertEqual(n['a'][99], 99)
def test_fetch(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
na = n.fetch('a')
na_val = na.value()
self.assertEqual(na_val[99], 99)
def test_child(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
na = n.child(0)
na_val = na.value()
self.assertEqual(na_val[99], 99)
n['b'] = vec
self.assertEqual(n.number_of_children(),2)
def test_save_load(self):
# on windows, this breaks at 27 !?
alen = 26
vec = np.array(range(alen), np.uint32)
n = Node()
n['a'] = vec
print(n)
n.save("test_pyconduit_node_save_load.conduit_bin")
nl = Node()
nl.load("test_pyconduit_node_save_load.conduit_bin")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
n.save("test_pyconduit_node_json_save_load.json",protocol="json")
nl = Node()
nl.load("test_pyconduit_node_json_save_load.json", protocol="json")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
n.save("test_pyconduit_node_base64_json_save_load.conduit_base64_json", protocol="conduit_base64_json")
nl = Node()
nl.load("test_pyconduit_node_base64_json_save_load.conduit_base64_json", protocol="conduit_base64_json")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
n.save("test_pyconduit_node_json_save_load.yaml",protocol="yaml")
nl = Node()
nl.load("test_pyconduit_node_json_save_load.yaml", protocol="yaml")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
def test_parse(self):
n = Node()
n.parse('{"a": 42.0}',"json")
self.assertTrue(n['a'] == np.float64(42.0))
n.parse('a: 52.0',"yaml")
self.assertTrue(n['a'] == np.float64(52.0))
def test_parent(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
na = n.fetch('a')
self.assertFalse(na.is_root())
# todo: test parent()
def test_total_bytes(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
self.assertEqual(n.total_strided_bytes(),4 * 100)
self.assertEqual(n.total_bytes_compact(),4 * 100)
# TODO: check if n.is_compact() should pass as well?
# it doesn't currently
self.assertTrue(n.fetch('a').is_compact())
def test_paths(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
for v in ['a','b','c']:
self.assertTrue(n.has_path(v))
paths = n.child_names()
for v in ['a','b','c']:
self.assertTrue(v in paths)
def test_list(self):
n = Node()
n.append().set(1)
self.assertTrue(n.child(0).value(),1)
self.assertTrue(n[0],1)
n2 = Node()
n2_c = n2.append()
n2_c.set(2)
self.assertEqual(n2.child(0).value(),2)
n3 = Node()
n3.fetch("here").append().set("a")
n3.fetch("here").append().set("b")
self.assertTrue(n3.fetch("here").child(0).value(),"a")
self.assertTrue(n3.fetch("here").child(1).value(),"b")
n4 = Node()
n4["here"].append().set("a")
n5 = n4["here"]
n5.append().set("b")
self.assertTrue(n4["here"].child(0).value(),"a")
self.assertTrue(n4["here"].child(1).value(),"b")
self.assertTrue(n4["here"][0],"a")
self.assertTrue(n4["here"][1],"b")
def test_remove(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
self.assertEqual(n.number_of_children(),3)
n.remove(path='c')
self.assertEqual(n.number_of_children(),2)
paths = n.child_names()
for v in ['a','b']:
self.assertTrue(v in paths)
n.remove(index=0)
paths = n.child_names()
for v in ['b']:
self.assertTrue(v in paths)
def test_info(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
ni = n.info();
#print ni
self.assertEqual(ni["total_strided_bytes"],n.total_strided_bytes())
def test_set_all_types(self):
types = [ 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64']
for type in types:
data = np.array(range(10), dtype=type)
n = Node()
n.set(data)
for i in range(len(data)):
self.assertEqual(n.value()[i], data[i])
def test_set_external(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
for type in types:
ext_data = np.array(range(10), dtype=type)
n = Node()
n.set_external(ext_data)
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
ext_data[5] = 11
n.value()[8] = 77
n.value()[2] = 8
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
def test_set_external_node(self):
n = Node()
n.set(np.array(range(10), np.int32))
n2 = Node()
# test set external with node
n2.set_external(n)
for i in range(10):
self.assertEqual(n.value()[i], n2.value()[i])
n.value()[2] = 8
n.value()[8] = 77
# set of n should reflect in n2 with set_external
self.assertEqual(8, n2.value()[2])
self.assertEqual(77, n2.value()[8])
def test_set_external_basic_slice(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
for type in types:
base_data = np.array(range(20), dtype=type)
ext_data = base_data[1:16]
n = Node()
n.set_external(ext_data)
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
ext_data[5] = 11
n.value()[6] = 77
n.value()[2] = 8
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
def test_set_external_basic_strides(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
for type in types:
base_data = np.array(range(20), dtype=type)
ext_data = base_data[1:16:2]
n = Node()
n.set_external(ext_data)
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
ext_data[5] = 11
n.value()[6] = 77
n.value()[2] = 8
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
def test_diff(self):
n1 = Node()
n2 = Node()
info = Node()
n1['a'] = 1
self.assertTrue(n1.diff(n2,info))
print(info)
n2['a'] = 1
self.assertFalse(n1.diff(n2,info))
n2['b'] = 2.0
self.assertTrue(n1.diff(n2,info))
self.assertFalse(n1.diff_compatible(n2,info))
n1['b'] = 1.0
self.assertFalse(n1.diff(n2,info,10))
def test_list_of_ints(self):
# also covered by test_set_all_types
# but this was the reproducer for
# https://github.com/LLNL/conduit/issues/281
n = Node()
a = np.array(list((1,2,3)))
n['a'] = a
self.assertEqual(n['a'][0], 1)
self.assertEqual(n['a'][1], 2)
self.assertEqual(n['a'][2], 3)
def test_compact_to(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
ni = n.info()
self.assertEqual(ni["mem_spaces"].number_of_children(), 3)
n2 = Node()
n.compact_to(n2)
ni = n2.info()
print(ni)
self.assertEqual(ni["mem_spaces"].number_of_children(), 1)
def test_update(self):
n = Node()
data = np.array(range(10), dtype='float64')
n["data"].set_external(data)
print(n)
n2 = Node()
n2.update(n)
print(n2)
self.assertEqual(n2["data"][0],0)
n3 = Node()
n3.update_external(n)
data[0] = 10
print(n3)
self.assertEqual(n3["data"][0],10)
n4 = Node()
n4["data"] = 10
n4.update_compatible(n)
print(n4)
self.assertEqual(n4["data"],10)
def test_reset(self):
n = Node()
data = np.array(range(10), dtype='float64')
n["data"].set_external(data)
print(n)
n.reset()
self.assertEqual(n.number_of_children(), 0)
def test_child_rename(self):
a_val = np.uint32(10)
b_val = np.uint32(20)
n = Node()
with self.assertRaises(Exception):
n.rename_child('a','b')
n['a'] = a_val
n['b'] = b_val
with self.assertRaises(Exception):
n.rename_child('bad','good')
with self.assertRaises(Exception):
n.rename_child('b','a')
self.assertTrue(n['a'] == a_val)
self.assertTrue(n['b'] == b_val)
n.rename_child('b','c')
self.assertTrue(n['a'] == a_val)
self.assertTrue(n['c'] == b_val)
def test_string(self):
n = Node();
n.set("my string!")
print(n)
self.assertEqual(n.value(),"my string!")
# test numpy string
nps = np.string_("my numpy string!")
n.set(nps)
print(n)
print(repr(n))
self.assertEqual(n.value(),"my numpy string!")
aofstrs = np.array(["here","are","a","few","strings"])
print(aofstrs)
n.set(aofstrs)
print(n)
self.assertEqual(n[0],"here")
self.assertEqual(n[1],"are")
self.assertEqual(n[2],"a")
self.assertEqual(n[3],"few")
self.assertEqual(n[4],"strings")
def test_numeric_tuples(self):
n = Node()
n["tuple_0"].set((1, 2, 3, 4))
n["tuple_1"].set((1.0, 2.0, 3.0, 4.0))
n["tuple_2"].set((1, 2, 3, 4.0))
print(n)
self.assertEqual(n['tuple_0'][0], 1)
self.assertEqual(n['tuple_0'][1], 2)
self.assertEqual(n['tuple_0'][2], 3)
self.assertEqual(n['tuple_0'][3], 4)
self.assertEqual(n['tuple_1'][0], 1.0)
self.assertEqual(n['tuple_1'][1], 2.0)
self.assertEqual(n['tuple_1'][2], 3.0)
self.assertEqual(n['tuple_1'][3], 4.0)
self.assertEqual(n['tuple_2'][0], 1.0)
self.assertEqual(n['tuple_2'][1], 2.0)
self.assertEqual(n['tuple_2'][2], 3.0)
self.assertEqual(n['tuple_2'][3], 4.0)
def test_numeric_lists(self):
n = Node()
n["list_0"].set((1, 2, 3, 4))
n["list_1"].set((1.0, 2.0, 3.0, 4.0))
n["list_2"].set((1, 2, 3, 4.0))
print(n)
self.assertEqual(n['list_0'][0], 1)
self.assertEqual(n['list_0'][1], 2)
self.assertEqual(n['list_0'][2], 3)
self.assertEqual(n['list_0'][3], 4)
self.assertEqual(n['list_1'][0], 1.0)
self.assertEqual(n['list_1'][1], 2.0)
self.assertEqual(n['list_1'][2], 3.0)
self.assertEqual(n['list_1'][3], 4.0)
self.assertEqual(n['list_2'][0], 1.0)
self.assertEqual(n['list_2'][1], 2.0)
self.assertEqual(n['list_2'][2], 3.0)
self.assertEqual(n['list_2'][3], 4.0)
def test_general_tuples(self):
n = Node()
n.set((1, "here"))
print(n)
self.assertEqual(n[0], 1.0)
self.assertEqual(n[1], "here")
def test_general_lists(self):
n = Node()
n.set([1, "there"])
print(n)
self.assertEqual(n[0], 1.0)
self.assertEqual(n[1], "there")
def test_key_with_slash(self):
n = Node()
n["normal/path"] = 10
n.add_child("child_with_/_inside").set(42)
print(n)
self.assertTrue(n.has_path("normal/path"))
self.assertFalse(n.has_child("normal/path"))
self.assertFalse(n.has_path("child_with_/_inside"))
self.assertTrue(n.has_child("child_with_/_inside"))
self.assertEqual(2,n.number_of_children())
self.assertEqual(n["normal/path"],10);
self.assertEqual(n.child(name="child_with_/_inside").value(),42);
n["normal"].remove_child("path")
self.assertFalse(n.has_path("normal/path"))
def test_fetch_existing(self):
n = Node()
n["my/path"] = 10
n_sub = n.fetch_existing("my/path")
self.assertEqual(n_sub.value(),10);
with self.assertRaises(Exception):
n.fetch_existing('bad/path')
def test_to_string(self):
a_val = np.uint32(10)
b_val = np.uint32(20)
c_val = np.float64(30.0)
n = Node()
n['a'] = a_val
n['b'] = b_val
n['c'] = c_val
res_to_str_def = n.to_string()
res_to_str_yaml = n.to_string(protocol="yaml")
res_to_str_json = n.to_string(protocol="json")
res_to_yaml = n.to_yaml()
res_to_json = n.to_json()
self.assertEqual(res_to_str_def, res_to_yaml);
self.assertEqual(res_to_str_yaml, res_to_yaml);
self.assertEqual(res_to_str_json, res_to_json);
n.print_detailed()
def test_numpy_slice_as_set_input(self):
n = Node()
# slice with non trivial strides
numpy_array = np.array(range(21), dtype='float64')
v = numpy_array.reshape((3, 7))
print("Input Array")
print(v)
print("Desired Slice")
print(v[:,0])
n['v'] = v
n['vs'] = v[:,0]
n['vs_expected'] = np.array(v[:,0],np.float64)
print(n)
sdiff = np.setdiff1d(n['vs'], v[:,0])
print("Set Difference: ",sdiff )
self.assertEqual(len(sdiff), 0);
# a more complex slice
numpy_array = np.array(range(105), dtype='float64')
v = numpy_array.reshape((3, 7, 5))
print("Input Array")
print(v)
print("Desired Slice")
print(v[:,0,3:5])
n['v'] = v
n['vs'] = v[:,0,3:5]
n['vs_expected'] = np.array(v[:,0,3:5],np.float64)
print(n)
sdiff = np.setdiff1d(n['vs'], v[:,0,3:5])
print("Set Difference: ",sdiff )
self.assertEqual(len(sdiff), 0);
def test_numpy_slice_as_set_external_input(self):
n = Node()
# slice with non trivial strides
numpy_array = np.array(range(21), dtype='float64')
v = numpy_array.reshape((3, 7))
print("Input Array")
print(v)
print("Desired Slice")
print(v[:,0])
n['v'] = v
n['vs'].set_external(v[:,0])
n['vs_expected'] = np.array(v[:,0],np.float64)
print(n)
sdiff = np.setdiff1d(n['vs'], v[:,0])
print("Set Difference: ",sdiff )
self.assertEqual(len(sdiff), 0);
# a more complex slice, can't use set external here.
n = Node()
numpy_array = np.array(range(105), dtype='float64')
v = numpy_array.reshape((3, 7, 5))
with self.assertRaises(TypeError):
n['vs'].set_external(v[:,0,3:5])
# lets do a 1-d eff slice, this should work since
# it reduces to a 1-D strided case
n['vs'].set_external(v[:,0,0])
n['vs_expected'] = np.array(v[:,0,0],np.float64)
def test_describe(self):
n = Node()
n["a"] = [1,2,3,4,5];
n["b"] = [1,2,3];
n["c"] = [1,2,3,4,5,6];
n["d"] = [1,2,3,4,5,6,7];
n["e"] = [1,2,3,4,5,6,7,8,9,10,11,12];
n["f"] = [1.0,2.0,3.0,4.0,5.0,6.0,7.0];
n["g"] = [2.0,4.0];
d = n.describe()
print(d)
self.assertEqual(d["a/count"],5);
self.assertEqual(d["b/count"],3);
self.assertEqual(d["c/count"],6);
self.assertEqual(d["d/count"],7);
self.assertEqual(d["e/count"],12);
self.assertEqual(d["f/count"],7);
self.assertEqual(d["a/min"],1)
self.assertEqual(d["b/min"],1)
self.assertEqual(d["c/min"],1)
self.assertEqual(d["d/min"],1)
self.assertEqual(d["e/min"],1)
self.assertEqual(d["f/min"],1.0)
self.assertEqual(d["a/max"],5)
self.assertEqual(d["b/max"],3)
self.assertEqual(d["c/max"],6)
self.assertEqual(d["d/max"],7)
self.assertEqual(d["e/max"],12)
self.assertEqual(d["f/max"],7.0)
self.assertEqual(d["g/mean"],3.0);
opts = Node()
opts["threshold"] = 10
d = n.describe(opts)
print(d)
def test_summary_string(self):
n = Node()
n["a"] = [1,2,3,4,5];
n["b"] = [1,2,3];
n["c"] = [1,2,3,4,5,6];
n["d"] = [1,2,3,4,5,6,7];
n["e"] = [1,2,3,4,5,6,7,8,9,10,11,12];
n["f"] = [1.0,2.0,3.0,4.0,5.0,6.0,7.0];
n["g"] = [2.0,4.0];
print(repr(n))
r = n.to_summary_string()
print(r)
texp = """
a: [1, 2, 3, 4, 5]
b: [1, 2, 3]
c: [1, 2, 3, ..., 5, 6]
d: [1, 2, 3, ..., 6, 7]
e: [1, 2, 3, ..., 11, 12]
f: [1.0, 2.0, 3.0, ..., 6.0, 7.0]
g: [2.0, 4.0]
"""
self.assertEqual(r,texp)
opts = Node()
opts["num_children_threshold"] = 2
opts["num_elements_threshold"] = 3
r = n.to_summary_string(opts)
print(r)
texp = """
a: [1, 2, ..., 5]
... ( skipped 5 children )
g: [2.0, 4.0]
"""
self.assertEqual(r,texp)
r = n.to_summary_string(opts=opts)
print(r)
self.assertEqual(r,texp)
opts = Node()
opts["num_children_threshold"] = 100
opts["num_elements_threshold"] = -1
r = n.to_summary_string(opts)
print(r)
self.assertEqual(r,n.to_yaml())
if __name__ == '__main__':
unittest.main() | 0.189484 | 0.517022 |
from __future__ import print_function
import re
from functools import partial
from mapproxy.compat import iteritems, itervalues, iterkeys
from mapproxy.request.wmts import (
wmts_request, make_wmts_rest_request_parser,
URLTemplateConverter,
FeatureInfoURLTemplateConverter,
)
from mapproxy.layer import InfoQuery
from mapproxy.featureinfo import combine_docs
from mapproxy.service.base import Server
from mapproxy.response import Response
from mapproxy.exception import RequestError
from mapproxy.util.coverage import load_limited_to
from mapproxy.util.ext.odict import odict
from mapproxy.template import template_loader, bunch
env = {'bunch': bunch}
get_template = template_loader(__name__, 'templates', namespace=env)
import logging
log = logging.getLogger(__name__)
class WMTSServer(Server):
service = 'wmts'
def __init__(self, layers, md, request_parser=None, max_tile_age=None, info_formats={}):
Server.__init__(self)
self.request_parser = request_parser or wmts_request
self.md = md
self.max_tile_age = max_tile_age
self.layers, self.matrix_sets = self._matrix_sets(layers)
self.capabilities_class = Capabilities
self.fi_transformers = None
self.info_formats = info_formats
def _matrix_sets(self, layers):
sets = {}
layers_grids = odict()
for layer in layers.values():
grid = layer.grid
if not grid.supports_access_with_origin('nw'):
log.warning("skipping layer '%s' for WMTS, grid '%s' of cache '%s' is not compatible with WMTS",
layer.name, grid.name, layer.md['cache_name'])
continue
if grid.name not in sets:
try:
sets[grid.name] = TileMatrixSet(grid)
except AssertionError:
continue # TODO
layers_grids.setdefault(layer.name, odict())[grid.name] = layer
wmts_layers = odict()
for layer_name, layers in layers_grids.items():
wmts_layers[layer_name] = WMTSTileLayer(layers)
return wmts_layers, sets.values()
def capabilities(self, request):
service = self._service_md(request)
layers = self.authorized_tile_layers(request.http.environ)
result = self.capabilities_class(service, layers, self.matrix_sets, info_formats=self.info_formats).render(request)
return Response(result, mimetype='application/xml')
def tile(self, request):
self.check_request(request)
tile_layer = self.layers[request.layer][request.tilematrixset]
if not request.format:
request.format = tile_layer.format
self.check_request_dimensions(tile_layer, request)
limited_to = self.authorize_tile_layer(tile_layer, request)
def decorate_img(image):
query_extent = tile_layer.grid.srs.srs_code, tile_layer.tile_bbox(request)
return self.decorate_img(image, 'wmts', [tile_layer.name], request.http.environ, query_extent)
tile = tile_layer.render(request, coverage=limited_to, decorate_img=decorate_img)
# set the content_type to tile.format and not to request.format ( to support mixed_mode)
resp = Response(tile.as_buffer(), content_type='image/' + tile.format)
resp.cache_headers(tile.timestamp, etag_data=(tile.timestamp, tile.size),
max_age=self.max_tile_age)
resp.make_conditional(request.http)
return resp
def featureinfo(self, request):
infos = []
self.check_request(request, self.info_formats)
tile_layer = self.layers[request.layer][request.tilematrixset]
if not request.format:
request.format = tile_layer.format
feature_count = None
# WMTS REST style request do not have request params
if hasattr(request, 'params'):
feature_count = request.params.get('feature_count', None)
bbox = tile_layer.grid.tile_bbox(request.tile)
query = InfoQuery(bbox, tile_layer.grid.tile_size, tile_layer.grid.srs, request.pos,
request.infoformat, feature_count=feature_count)
self.check_request_dimensions(tile_layer, request)
coverage = self.authorize_tile_layer(tile_layer, request, featureinfo=True)
if not tile_layer.info_sources:
raise RequestError('layer %s not queryable' % str(request.layer),
code='OperationNotSupported', request=request)
if coverage and not coverage.contains(query.coord, query.srs):
infos = []
else:
for source in tile_layer.info_sources:
info = source.get_info(query)
if info is None:
continue
infos.append(info)
mimetype = request.infoformat
if not infos:
return Response('', mimetype=mimetype)
resp, _ = combine_docs(infos)
return Response(resp, mimetype=mimetype)
def authorize_tile_layer(self, tile_layer, request, featureinfo=False):
if 'mapproxy.authorize' not in request.http.environ:
return
query_extent = tile_layer.grid.srs.srs_code, tile_layer.tile_bbox(request)
service = 'wmts'
key = 'tile'
if featureinfo:
service += '.featureinfo'
key = 'featureinfo'
result = request.http.environ['mapproxy.authorize'](service, [tile_layer.name],
query_extent=query_extent, environ=request.http.environ)
if result['authorized'] == 'unauthenticated':
raise RequestError('unauthorized', status=401)
if result['authorized'] == 'full':
return
if result['authorized'] == 'partial':
if result['layers'].get(tile_layer.name, {}).get(key, False) == True:
limited_to = result['layers'][tile_layer.name].get('limited_to')
if not limited_to:
limited_to = result.get('limited_to')
if limited_to:
return load_limited_to(limited_to)
else:
return None
raise RequestError('forbidden', status=403)
def authorized_tile_layers(self, env):
if 'mapproxy.authorize' in env:
result = env['mapproxy.authorize']('wmts', [l for l in self.layers],
query_extent=None, environ=env)
if result['authorized'] == 'unauthenticated':
raise RequestError('unauthorized', status=401)
if result['authorized'] == 'full':
return self.layers.values()
if result['authorized'] == 'none':
raise RequestError('forbidden', status=403)
allowed_layers = []
for layer in itervalues(self.layers):
if result['layers'].get(layer.name, {}).get('tile', False) == True:
allowed_layers.append(layer)
return allowed_layers
else:
return self.layers.values()
def check_request(self, request, info_formats=None):
request.make_request()
if request.layer not in self.layers:
raise RequestError('unknown layer: ' + str(request.layer),
code='InvalidParameterValue', request=request)
if request.tilematrixset not in self.layers[request.layer]:
raise RequestError('unknown tilematrixset: ' + str(request.tilematrixset),
code='InvalidParameterValue', request=request)
if info_formats is not None:
if '/' in request.infoformat: # mimetype
if request.infoformat not in self.info_formats.values():
raise RequestError('unknown infoformat: ' + str(request.infoformat),
code='InvalidParameterValue', request=request)
else: # RESTful suffix
if request.infoformat not in self.info_formats:
raise RequestError('unknown infoformat: ' + str(request.infoformat),
code='InvalidParameterValue', request=request)
# set mimetype as infoformat
request.infoformat = self.info_formats[request.infoformat]
def check_request_dimensions(self, tile_layer, request):
# allow arbitrary dimensions in KVP service
# actual used values are checked later in TileLayer
pass
def _service_md(self, tile_request):
md = dict(self.md)
md['url'] = tile_request.url
return md
class WMTSRestServer(WMTSServer):
"""
OGC WMTS 1.0.0 RESTful Server
"""
service = None
names = ('wmts',)
request_methods = ('tile', 'capabilities')
default_template = '/{Layer}/{TileMatrixSet}/{TileMatrix}/{TileCol}/{TileRow}.{Format}'
default_info_template = '/{Layer}/{TileMatrixSet}/{TileMatrix}/{TileCol}/{TileRow}/{I}/{J}.{InfoFormat}'
def __init__(self, layers, md, max_tile_age=None, template=None, fi_template=None, info_formats={}):
WMTSServer.__init__(self, layers, md)
self.max_tile_age = max_tile_age
self.template = template or self.default_template
self.fi_template = fi_template or self.default_info_template
self.info_formats = info_formats
self.url_converter = URLTemplateConverter(self.template)
self.fi_url_converter = FeatureInfoURLTemplateConverter(self.fi_template)
self.request_parser = make_wmts_rest_request_parser(self.url_converter, self.fi_url_converter)
self.capabilities_class = partial(RestfulCapabilities, url_converter=self.url_converter, fi_url_converter=self.fi_url_converter)
def check_request_dimensions(self, tile_layer, request):
# check that unknown dimension for this layer are set to default
if request.dimensions:
for dimension, value in iteritems(request.dimensions):
dimension = dimension.lower()
if dimension not in tile_layer.dimensions and value != 'default':
raise RequestError('unknown dimension: ' + str(dimension),
code='InvalidParameterValue', request=request)
class Capabilities(object):
"""
Renders WMS capabilities documents.
"""
def __init__(self, server_md, layers, matrix_sets, info_formats={}):
self.service = server_md
self.layers = layers
self.info_formats = info_formats
self.matrix_sets = matrix_sets
def render(self, _map_request):
return self._render_template(_map_request.capabilities_template)
def template_context(self):
return dict(service=bunch(default='', **self.service),
restful=False,
layers=self.layers,
info_formats=self.info_formats,
tile_matrix_sets=self.matrix_sets)
def _render_template(self, template):
template = get_template(template)
doc = template.substitute(**self.template_context())
# strip blank lines
doc = '\n'.join(l for l in doc.split('\n') if l.rstrip())
return doc
class RestfulCapabilities(Capabilities):
def __init__(self, server_md, layers, matrix_sets, url_converter, fi_url_converter, info_formats={}):
Capabilities.__init__(self, server_md, layers, matrix_sets, info_formats=info_formats)
self.url_converter = url_converter
self.fi_url_converter = fi_url_converter
def template_context(self):
return dict(service=bunch(default='', **self.service),
restful=True,
layers=self.layers,
info_formats=self.info_formats,
tile_matrix_sets=self.matrix_sets,
resource_template=self.url_converter.template,
fi_resource_template=self.fi_url_converter.template,
# dimension_key maps lowercase dimensions to the actual
# casing from the restful template
dimension_keys=dict((k.lower(), k) for k in self.url_converter.dimensions),
format_resource_template=format_resource_template,
format_info_resource_template=format_info_resource_template,
)
def format_resource_template(layer, template, service):
if '{Format}' in template:
template = template.replace('{Format}', layer.format)
if '{Layer}' in template:
template = template.replace('{Layer}', layer.name)
return service.url + template
def format_info_resource_template(layer, template, info_format, service):
if '{InfoFormat}' in template:
template = template.replace('{InfoFormat}', info_format)
if '{Layer}' in template:
template = template.replace('{Layer}', layer.name)
return service.url + template
class WMTSTileLayer(object):
"""
Wrap multiple TileLayers for the same cache but with different grids.
"""
def __init__(self, layers):
self.grids = [lyr.grid for lyr in layers.values()]
self.layers = layers
self._layer = layers[next(iterkeys(layers))]
def __getattr__(self, name):
return getattr(self._layer, name)
def __contains__(self, gridname):
return gridname in self.layers
def __getitem__(self, gridname):
return self.layers[gridname]
# calculated from well-known scale set GoogleCRS84Quad
METERS_PER_DEEGREE = 111319.4907932736
def meter_per_unit(srs):
if srs.is_latlong:
return METERS_PER_DEEGREE
return 1
class TileMatrixSet(object):
def __init__(self, grid):
self.grid = grid
self.name = grid.name
self.srs_name = grid.srs.srs_code
self.tile_matrices = list(self._tile_matrices())
def __iter__(self):
return iter(self.tile_matrices)
def _tile_matrices(self):
for level, res in self.grid.resolutions.iteritems():
origin = self.grid.origin_tile(level, 'ul')
bbox = self.grid.tile_bbox(origin)
topleft = bbox[0], bbox[3]
if self.grid.srs.is_axis_order_ne:
topleft = bbox[3], bbox[0]
grid_size = self.grid.grid_sizes[level]
scale_denom = res / (0.28 / 1000) * meter_per_unit(self.grid.srs)
yield bunch(
identifier=level,
topleft=topleft,
grid_size=grid_size,
scale_denom=scale_denom,
tile_size=self.grid.tile_size,
) | mapproxy/service/wmts.py | from __future__ import print_function
import re
from functools import partial
from mapproxy.compat import iteritems, itervalues, iterkeys
from mapproxy.request.wmts import (
wmts_request, make_wmts_rest_request_parser,
URLTemplateConverter,
FeatureInfoURLTemplateConverter,
)
from mapproxy.layer import InfoQuery
from mapproxy.featureinfo import combine_docs
from mapproxy.service.base import Server
from mapproxy.response import Response
from mapproxy.exception import RequestError
from mapproxy.util.coverage import load_limited_to
from mapproxy.util.ext.odict import odict
from mapproxy.template import template_loader, bunch
env = {'bunch': bunch}
get_template = template_loader(__name__, 'templates', namespace=env)
import logging
log = logging.getLogger(__name__)
class WMTSServer(Server):
service = 'wmts'
def __init__(self, layers, md, request_parser=None, max_tile_age=None, info_formats={}):
Server.__init__(self)
self.request_parser = request_parser or wmts_request
self.md = md
self.max_tile_age = max_tile_age
self.layers, self.matrix_sets = self._matrix_sets(layers)
self.capabilities_class = Capabilities
self.fi_transformers = None
self.info_formats = info_formats
def _matrix_sets(self, layers):
sets = {}
layers_grids = odict()
for layer in layers.values():
grid = layer.grid
if not grid.supports_access_with_origin('nw'):
log.warning("skipping layer '%s' for WMTS, grid '%s' of cache '%s' is not compatible with WMTS",
layer.name, grid.name, layer.md['cache_name'])
continue
if grid.name not in sets:
try:
sets[grid.name] = TileMatrixSet(grid)
except AssertionError:
continue # TODO
layers_grids.setdefault(layer.name, odict())[grid.name] = layer
wmts_layers = odict()
for layer_name, layers in layers_grids.items():
wmts_layers[layer_name] = WMTSTileLayer(layers)
return wmts_layers, sets.values()
def capabilities(self, request):
service = self._service_md(request)
layers = self.authorized_tile_layers(request.http.environ)
result = self.capabilities_class(service, layers, self.matrix_sets, info_formats=self.info_formats).render(request)
return Response(result, mimetype='application/xml')
def tile(self, request):
self.check_request(request)
tile_layer = self.layers[request.layer][request.tilematrixset]
if not request.format:
request.format = tile_layer.format
self.check_request_dimensions(tile_layer, request)
limited_to = self.authorize_tile_layer(tile_layer, request)
def decorate_img(image):
query_extent = tile_layer.grid.srs.srs_code, tile_layer.tile_bbox(request)
return self.decorate_img(image, 'wmts', [tile_layer.name], request.http.environ, query_extent)
tile = tile_layer.render(request, coverage=limited_to, decorate_img=decorate_img)
# set the content_type to tile.format and not to request.format ( to support mixed_mode)
resp = Response(tile.as_buffer(), content_type='image/' + tile.format)
resp.cache_headers(tile.timestamp, etag_data=(tile.timestamp, tile.size),
max_age=self.max_tile_age)
resp.make_conditional(request.http)
return resp
def featureinfo(self, request):
infos = []
self.check_request(request, self.info_formats)
tile_layer = self.layers[request.layer][request.tilematrixset]
if not request.format:
request.format = tile_layer.format
feature_count = None
# WMTS REST style request do not have request params
if hasattr(request, 'params'):
feature_count = request.params.get('feature_count', None)
bbox = tile_layer.grid.tile_bbox(request.tile)
query = InfoQuery(bbox, tile_layer.grid.tile_size, tile_layer.grid.srs, request.pos,
request.infoformat, feature_count=feature_count)
self.check_request_dimensions(tile_layer, request)
coverage = self.authorize_tile_layer(tile_layer, request, featureinfo=True)
if not tile_layer.info_sources:
raise RequestError('layer %s not queryable' % str(request.layer),
code='OperationNotSupported', request=request)
if coverage and not coverage.contains(query.coord, query.srs):
infos = []
else:
for source in tile_layer.info_sources:
info = source.get_info(query)
if info is None:
continue
infos.append(info)
mimetype = request.infoformat
if not infos:
return Response('', mimetype=mimetype)
resp, _ = combine_docs(infos)
return Response(resp, mimetype=mimetype)
def authorize_tile_layer(self, tile_layer, request, featureinfo=False):
if 'mapproxy.authorize' not in request.http.environ:
return
query_extent = tile_layer.grid.srs.srs_code, tile_layer.tile_bbox(request)
service = 'wmts'
key = 'tile'
if featureinfo:
service += '.featureinfo'
key = 'featureinfo'
result = request.http.environ['mapproxy.authorize'](service, [tile_layer.name],
query_extent=query_extent, environ=request.http.environ)
if result['authorized'] == 'unauthenticated':
raise RequestError('unauthorized', status=401)
if result['authorized'] == 'full':
return
if result['authorized'] == 'partial':
if result['layers'].get(tile_layer.name, {}).get(key, False) == True:
limited_to = result['layers'][tile_layer.name].get('limited_to')
if not limited_to:
limited_to = result.get('limited_to')
if limited_to:
return load_limited_to(limited_to)
else:
return None
raise RequestError('forbidden', status=403)
def authorized_tile_layers(self, env):
if 'mapproxy.authorize' in env:
result = env['mapproxy.authorize']('wmts', [l for l in self.layers],
query_extent=None, environ=env)
if result['authorized'] == 'unauthenticated':
raise RequestError('unauthorized', status=401)
if result['authorized'] == 'full':
return self.layers.values()
if result['authorized'] == 'none':
raise RequestError('forbidden', status=403)
allowed_layers = []
for layer in itervalues(self.layers):
if result['layers'].get(layer.name, {}).get('tile', False) == True:
allowed_layers.append(layer)
return allowed_layers
else:
return self.layers.values()
def check_request(self, request, info_formats=None):
request.make_request()
if request.layer not in self.layers:
raise RequestError('unknown layer: ' + str(request.layer),
code='InvalidParameterValue', request=request)
if request.tilematrixset not in self.layers[request.layer]:
raise RequestError('unknown tilematrixset: ' + str(request.tilematrixset),
code='InvalidParameterValue', request=request)
if info_formats is not None:
if '/' in request.infoformat: # mimetype
if request.infoformat not in self.info_formats.values():
raise RequestError('unknown infoformat: ' + str(request.infoformat),
code='InvalidParameterValue', request=request)
else: # RESTful suffix
if request.infoformat not in self.info_formats:
raise RequestError('unknown infoformat: ' + str(request.infoformat),
code='InvalidParameterValue', request=request)
# set mimetype as infoformat
request.infoformat = self.info_formats[request.infoformat]
def check_request_dimensions(self, tile_layer, request):
# allow arbitrary dimensions in KVP service
# actual used values are checked later in TileLayer
pass
def _service_md(self, tile_request):
md = dict(self.md)
md['url'] = tile_request.url
return md
class WMTSRestServer(WMTSServer):
"""
OGC WMTS 1.0.0 RESTful Server
"""
service = None
names = ('wmts',)
request_methods = ('tile', 'capabilities')
default_template = '/{Layer}/{TileMatrixSet}/{TileMatrix}/{TileCol}/{TileRow}.{Format}'
default_info_template = '/{Layer}/{TileMatrixSet}/{TileMatrix}/{TileCol}/{TileRow}/{I}/{J}.{InfoFormat}'
def __init__(self, layers, md, max_tile_age=None, template=None, fi_template=None, info_formats={}):
WMTSServer.__init__(self, layers, md)
self.max_tile_age = max_tile_age
self.template = template or self.default_template
self.fi_template = fi_template or self.default_info_template
self.info_formats = info_formats
self.url_converter = URLTemplateConverter(self.template)
self.fi_url_converter = FeatureInfoURLTemplateConverter(self.fi_template)
self.request_parser = make_wmts_rest_request_parser(self.url_converter, self.fi_url_converter)
self.capabilities_class = partial(RestfulCapabilities, url_converter=self.url_converter, fi_url_converter=self.fi_url_converter)
def check_request_dimensions(self, tile_layer, request):
# check that unknown dimension for this layer are set to default
if request.dimensions:
for dimension, value in iteritems(request.dimensions):
dimension = dimension.lower()
if dimension not in tile_layer.dimensions and value != 'default':
raise RequestError('unknown dimension: ' + str(dimension),
code='InvalidParameterValue', request=request)
class Capabilities(object):
"""
Renders WMS capabilities documents.
"""
def __init__(self, server_md, layers, matrix_sets, info_formats={}):
self.service = server_md
self.layers = layers
self.info_formats = info_formats
self.matrix_sets = matrix_sets
def render(self, _map_request):
return self._render_template(_map_request.capabilities_template)
def template_context(self):
return dict(service=bunch(default='', **self.service),
restful=False,
layers=self.layers,
info_formats=self.info_formats,
tile_matrix_sets=self.matrix_sets)
def _render_template(self, template):
template = get_template(template)
doc = template.substitute(**self.template_context())
# strip blank lines
doc = '\n'.join(l for l in doc.split('\n') if l.rstrip())
return doc
class RestfulCapabilities(Capabilities):
def __init__(self, server_md, layers, matrix_sets, url_converter, fi_url_converter, info_formats={}):
Capabilities.__init__(self, server_md, layers, matrix_sets, info_formats=info_formats)
self.url_converter = url_converter
self.fi_url_converter = fi_url_converter
def template_context(self):
return dict(service=bunch(default='', **self.service),
restful=True,
layers=self.layers,
info_formats=self.info_formats,
tile_matrix_sets=self.matrix_sets,
resource_template=self.url_converter.template,
fi_resource_template=self.fi_url_converter.template,
# dimension_key maps lowercase dimensions to the actual
# casing from the restful template
dimension_keys=dict((k.lower(), k) for k in self.url_converter.dimensions),
format_resource_template=format_resource_template,
format_info_resource_template=format_info_resource_template,
)
def format_resource_template(layer, template, service):
if '{Format}' in template:
template = template.replace('{Format}', layer.format)
if '{Layer}' in template:
template = template.replace('{Layer}', layer.name)
return service.url + template
def format_info_resource_template(layer, template, info_format, service):
if '{InfoFormat}' in template:
template = template.replace('{InfoFormat}', info_format)
if '{Layer}' in template:
template = template.replace('{Layer}', layer.name)
return service.url + template
class WMTSTileLayer(object):
"""
Wrap multiple TileLayers for the same cache but with different grids.
"""
def __init__(self, layers):
self.grids = [lyr.grid for lyr in layers.values()]
self.layers = layers
self._layer = layers[next(iterkeys(layers))]
def __getattr__(self, name):
return getattr(self._layer, name)
def __contains__(self, gridname):
return gridname in self.layers
def __getitem__(self, gridname):
return self.layers[gridname]
# calculated from well-known scale set GoogleCRS84Quad
METERS_PER_DEEGREE = 111319.4907932736
def meter_per_unit(srs):
if srs.is_latlong:
return METERS_PER_DEEGREE
return 1
class TileMatrixSet(object):
def __init__(self, grid):
self.grid = grid
self.name = grid.name
self.srs_name = grid.srs.srs_code
self.tile_matrices = list(self._tile_matrices())
def __iter__(self):
return iter(self.tile_matrices)
def _tile_matrices(self):
for level, res in self.grid.resolutions.iteritems():
origin = self.grid.origin_tile(level, 'ul')
bbox = self.grid.tile_bbox(origin)
topleft = bbox[0], bbox[3]
if self.grid.srs.is_axis_order_ne:
topleft = bbox[3], bbox[0]
grid_size = self.grid.grid_sizes[level]
scale_denom = res / (0.28 / 1000) * meter_per_unit(self.grid.srs)
yield bunch(
identifier=level,
topleft=topleft,
grid_size=grid_size,
scale_denom=scale_denom,
tile_size=self.grid.tile_size,
) | 0.318379 | 0.161982 |
import torch
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate overlap between two set of bboxes.
If ``is_aligned`` is ``False``, then calculate the ious between each bbox
of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (m, 4)
bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n
must be equal.
mode (str): "iou" (intersection over union) or iof (intersection over
foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert mode in ['iou', 'iof']
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if is_aligned:
assert rows == cols
if rows * cols == 0:
return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
if is_aligned:
lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]
rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, 2]
overlap = wh[:, 0] * wh[:, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1 + area2 - overlap)
else:
ious = overlap / area1
else:
lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]
rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]
overlap = wh[:, :, 0] * wh[:, :, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1[:, None] + area2 - overlap)
else:
ious = overlap / (area1[:, None])
return ious
def bbox_areas(bboxes, keep_axis=False):
x_min, y_min, x_max, y_max = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
areas = (y_max - y_min + 1) * (x_max - x_min + 1)
if keep_axis:
return areas[:, None]
return areas | mmdet/core/bbox/geometry.py | import torch
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate overlap between two set of bboxes.
If ``is_aligned`` is ``False``, then calculate the ious between each bbox
of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (m, 4)
bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n
must be equal.
mode (str): "iou" (intersection over union) or iof (intersection over
foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert mode in ['iou', 'iof']
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if is_aligned:
assert rows == cols
if rows * cols == 0:
return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
if is_aligned:
lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]
rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, 2]
overlap = wh[:, 0] * wh[:, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1 + area2 - overlap)
else:
ious = overlap / area1
else:
lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]
rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]
overlap = wh[:, :, 0] * wh[:, :, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1[:, None] + area2 - overlap)
else:
ious = overlap / (area1[:, None])
return ious
def bbox_areas(bboxes, keep_axis=False):
x_min, y_min, x_max, y_max = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
areas = (y_max - y_min + 1) * (x_max - x_min + 1)
if keep_axis:
return areas[:, None]
return areas | 0.887583 | 0.747432 |
import random
import json
import logging
import copy
from camera_trap_classifier.data.utils import (
randomly_split_dataset, map_label_list_to_numeric_dict,
export_dict_to_json, _balanced_sampling)
from camera_trap_classifier.data.importer import DatasetImporter
logger = logging.getLogger(__name__)
class DatasetInventory(object):
""" Defines a Datset Inventory - Contains labels, links and data about each
Record
"""
missing_label_value = '-1'
missing_label_value_num = -1
def get_all_record_ids(self):
""" Get all ids of the inventory """
return list(self.data_inventory.keys())
def get_record_id_data(self, record_id):
""" Get content of record id """
return self.data_inventory[record_id]
def get_number_of_records(self):
""" Count and Return number of records """
return len(self.data_inventory.keys())
def remove_record(self, id_to_remove):
""" Remove specific record """
self.data_inventory.pop(id_to_remove, None)
def _get_all_labels(self):
""" Extract all labels
Returns: {'species': ('elephant', 'zebra'),
'count': ('1', '2')}
"""
all_labels = dict()
for k, v in self.data_inventory.items():
for label_entry in v['labels']:
# For each record get and count label types and labels
for label_name, label_value in label_entry.items():
if label_name not in all_labels:
all_labels[label_name] = set()
if not label_value == type(self).missing_label_value:
all_labels[label_name].add(label_value)
return all_labels
def _calc_label_stats(self):
""" Calculate Label Stats
Returns: {'species': {'Zebra': 3, 'Elephant': 6},
'counts': {'1': 5, '2': 10}}
"""
# Calculate and log statistics about labels
label_stats = dict()
for _id, data in self.data_inventory.items():
# For each record get and count label types and labels
for label_entry in data['labels']:
for label_name, label_val in label_entry.items():
if label_name not in label_stats:
label_stats[label_name] = dict()
if label_val not in label_stats[label_name]:
label_stats[label_name][label_val] = 0
label_stats[label_name][label_val] += 1
return label_stats
def log_stats(self, debug_only=False):
""" Logs Statistics about Data Inventory """
label_stats = self._calc_label_stats()
# Log Stats
for label_type, labels in label_stats.items():
label_list = list()
count_list = list()
for label, count in labels.items():
label_list.append(label)
count_list.append(count)
total_counts = sum(count_list)
sort_index = sorted(range(len(count_list)), reverse=True,
key=lambda k: count_list[k])
for idx in sort_index:
if debug_only:
logger.debug(
"Label Type: %s Label: %s Records: %s / %s (%s %%)" %
(label_type, label_list[idx], count_list[idx],
total_counts,
round(100 * (count_list[idx]/total_counts), 4)))
else:
logger.info(
"Label Type: %s Label: %s Records: %s / %s (%s %%)" %
(label_type, label_list[idx], count_list[idx],
total_counts,
round(100 * (count_list[idx]/total_counts), 4)))
def export_to_json(self, json_path):
""" Export Inventory to Json File """
if self.data_inventory is not None:
with open(json_path, 'w') as fp:
json.dump(self.data_inventory, fp)
logger.info("Data Inventory saved to %s" % json_path)
else:
logger.warning("Cant export data inventory to json - no\
inventory created yet")
def export_to_tfrecord(self, tfr_writer, tfr_path,
**kwargs):
""" Export Dataset to TFRecod """
# create tfrecord dictionary
tfrecord_dict = dict()
for _id, record_values in self.data_inventory.items():
tfr_record = self._convert_record_to_tfr_format(
_id, record_values)
tfrecord_dict[_id] = tfr_record
# Write to disk
tfr_writer.encode_to_tfr(tfrecord_dict, tfr_path, **kwargs)
def _convert_record_to_tfr_format(self, id, record):
""" Convert a record to a tfr format """
# Extract and convert meta data information
if 'meta_data' in record.keys():
if isinstance(record['meta_data'], str):
meta_data = record['meta_data']
elif isinstance(record['meta_data'], dict):
meta_data = json.dumps(record['meta_data'])
else:
meta_data = ''
else:
meta_data = ''
# Generate concatenated labels text
label_text = list()
for label in record['labels']:
for label_name, label_value in label.items():
label_text += ['#' + label_name + ':' + label_value]
label_text = ''.join(label_text)
# generate labels dict, save string and numeric labels
labels_dict = dict()
labels_num_dict = dict()
for label in record['labels']:
for label_name, label_value in label.items():
label_id = 'label/' + label_name
label_id_num = 'label_num/' + label_name
if label_name not in labels_dict:
labels_dict[label_id] = []
labels_num_dict[label_id_num] = []
if label_value == type(self).missing_label_value:
val_num = type(self).missing_label_value_num
else:
val_num = self.labels_numeric_map[label_name][label_value]
val = label_value
labels_num_dict[label_id_num].append(val_num)
labels_dict[label_id].append(val)
tfr_data = {
"id": str(id),
"n_images": len(record['images']),
"n_labels": len(record['labels']),
"image_paths": record['images'],
"meta_data": meta_data,
"labelstext": label_text,
**labels_dict,
**labels_num_dict
}
return tfr_data
def export_label_mapping(self, path):
""" Export Label Mapping to Json file """
assert self.labels_numeric_map is not None, \
"Numeric Label Mapping has not been generated"
export_dict_to_json(self.labels_numeric_map, path)
class DatasetInventorySplit(DatasetInventory):
""" Datset Dictionary Split - Does not allow further
manipulations
"""
def __init__(self, data_inventory, labels, labels_numeric_map):
self.data_inventory = data_inventory
self.labels = labels
self.labels_numeric_map = labels_numeric_map
class DatasetInventoryMaster(DatasetInventory):
""" Creates Datset Dictionary from a source and allows to
manipulate labels and create splits
"""
def __init__(self, labels_numeric_map=None):
self.data_inventory = None
self.labels = None
self.labels_numeric_map = labels_numeric_map
def _map_labels_to_numeric(self):
""" Map all labels to numerics """
if self.labels_numeric_map is None:
self.labels = self._get_all_labels()
labels_numeric_map = dict()
for label_name, label_set in self.labels.items():
mapped = map_label_list_to_numeric_dict(list(label_set))
labels_numeric_map[label_name] = mapped
self.labels_numeric_map = labels_numeric_map
# create numeric to text labels as well
self.label_mapping_from_num = \
{k: {kk: vv for vv, kk in v.items()}
for k, v in self.labels_numeric_map.items()}
def create_from_source(self, type, params):
""" Create Dataset Inventory from a specific Source """
importer = DatasetImporter().create(type, params)
self.data_inventory = importer.import_from_source()
# self.label_handler = LabelHandler(self.data_inventory)
# self.label_handler.remove_not_all_label_attributes()
def remove_multi_label_records(self):
""" Remove records with multiple labels / observations """
to_remove = list()
for record_id, data in self.data_inventory.items():
if len(data['labels']) > 1:
to_remove.append(record_id)
logger.info("Removing %s records with multiple labels" %
len(to_remove))
for record_id in to_remove:
self.remove_record(record_id)
def randomly_remove_samples_to_percent(self, p_keep):
""" Randomly sample a percentage of all records """
if not p_keep <= 1:
raise ValueError("p has to be between 0 and 1")
new_data_inv = dict()
all_ids = list(self.data_inventory.keys())
n_total = len(all_ids)
n_choices = int(n_total * p_keep)
choices = random.sample(all_ids, k=n_choices)
for id in choices:
new_data_inv[id] = self.data_inventory[id]
self.data_inventory = new_data_inv
def remove_records_with_label(self, label_name_list, label_value_list):
""" Remove all records with labels in label_name and corresponding
label values
Example: label_name : [species, species]
label_value: ['zebra', 'elephant']
"""
assert all([isinstance(label_name_list, list),
isinstance(label_value_list, list)]), \
"label_name_list and label_value_list must be lists"
for label_name, label_value in zip(label_name_list, label_value_list):
self._remove_records_with_label(label_name, label_value)
def _remove_records_with_label(self, label_name, label_value):
""" Remove all records with 'label_value' for 'label_name'
Example: label_name: 'species' label_value: 'Zebra'
"""
ids_to_remove = list()
for record_id, record_value in self.data_inventory.items():
labels_list = record_value['labels']
for label in labels_list:
for l_name, l_val_list in label.items():
if (label_name == l_name):
if label_value in l_val_list:
ids_to_remove.append(record_id)
logger.info("Removing %s records from label %s with value %s" %
(len(ids_to_remove), label_name, label_value))
for id_to_remove in ids_to_remove:
self.remove_record(id_to_remove)
def keep_only_records_with_label(self, label_name_list, label_value_list):
""" Keep only records with (at least one) of the specified
label_name and corresponding label values
"""
assert all([isinstance(label_name_list, list),
isinstance(label_value_list, list)]), \
"label_name_list and label_value_list must be lists"
to_keep = set()
for label_name, label_value in zip(label_name_list, label_value_list):
to_keep = to_keep.union(
self._keep_only_record_with_label(label_name, label_value))
logger.info("Keeping %s records" % len(to_keep))
to_remove = self.data_inventory.keys() - to_keep
for id_to_remove in to_remove:
self.remove_record(id_to_remove)
def _keep_only_record_with_label(self, label_name, label_value):
""" Keep only records with the label_value of the label_name
"""
ids_to_keep = set()
for record_id, record_value in self.data_inventory.items():
labels_list = record_value['labels']
for label in labels_list:
for l_name, l_val in label.items():
if (label_name == l_name):
if label_value == l_val:
ids_to_keep.add(record_id)
return ids_to_keep
def _remove_records_with_any_missing_label(self):
""" Remove any records with the default missing value of -1 """
ids_to_remove = set()
for record_id, record_value in self.data_inventory.items():
labels_list = record_value['labels']
for label in labels_list:
for l_vals in label.values():
if l_vals == type(self).missing_label_value:
ids_to_remove.add(record_id)
logger.info("Removing %s records with missing labels" %
len(ids_to_remove))
for id_to_remove in ids_to_remove:
self.remove_record(id_to_remove)
def split_inventory_by_random_splits_with_balanced_sample(
self,
split_label_min,
split_names,
split_percent):
""" Split inventory randomly into different sets
according to
split_label_min: e.g 'species'
Returns dict: {'id1': 'test', 'id2': 'train'}
"""
# Create a dictionary mapping each record to label for sampling
ids_to_split_label = dict()
for record_id, record_value in self.data_inventory.items():
first_labels_entry = record_value['labels'][0]
# only consider first entry in labels list
if split_label_min in first_labels_entry:
split_label = first_labels_entry[split_label_min]
ids_to_split_label[record_id] = split_label
split_ids = list(ids_to_split_label.keys())
logger.debug("Found %s record to split randomly" % len(split_ids))
split_assignments = randomly_split_dataset(
split_ids,
split_names,
split_percent,
balanced_sampling_min=True,
balanced_sampling_id_to_label=ids_to_split_label)
logger.debug("Found %s records with split assignments" %
len(split_assignments.keys()))
return self._convert_splits_to_dataset_inventorys(split_assignments)
def split_inventory_by_random_splits(
self,
split_names,
split_percent):
""" Split inventory randomly into different sets
Returns dict: {'id1': 'test', 'id2': 'train'}
"""
split_ids = list(self.data_inventory.keys())
split_assignments = randomly_split_dataset(
split_ids,
split_names,
split_percent,
balanced_sampling_min=False,
balanced_sampling_id_to_label=None)
return self._convert_splits_to_dataset_inventorys(split_assignments)
def split_inventory_by_meta_data_column(
self,
meta_colum
):
""" Split inventory into different sets based on
meta_data_column
"""
split_assignments = dict()
for record_id, record_value in self.data_inventory.items():
meta_val = record_value['meta_data'][meta_colum]
split_assignments[record_id] = meta_val
return self._convert_splits_to_dataset_inventorys(split_assignments)
def split_inventory_by_meta_data_column_and_balanced_sampling(
self,
meta_colum,
split_label_min
):
""" Split inventory into different sets based on
meta_data_column after balanced sampling
"""
id_to_label = dict()
for record_id, record_data in self.data_inventory.items():
# take only the first entry of the labels / observations to assign
# a label for that record
if split_label_min in record_data['labels'][0]:
label = record_data['labels'][0][split_label_min]
id_to_label[record_id] = label
remaining_ids = set(_balanced_sampling(id_to_label))
split_assignments = dict()
for record_id, record_value in self.data_inventory.items():
meta_val = record_value['meta_data'][meta_colum]
if record_id in remaining_ids:
split_assignments[record_id] = meta_val
return self._convert_splits_to_dataset_inventorys(split_assignments)
def _convert_splits_to_dataset_inventorys(self, split_assignments):
""" Convert split assignments to new splitted dataset inventories """
# label overview
all_labels = self._get_all_labels()
self._map_labels_to_numeric()
# Create dictionary with split_name to id mapping
split_to_record = {}
for k, v in split_assignments.items():
split_to_record[v] = split_to_record.get(v, [])
split_to_record[v].append(k)
# Create new splitted data inventories
splitted_inventories = dict()
for split, record_list in split_to_record.items():
split_dict = dict()
for record_id in record_list:
split_dict[record_id] = self.data_inventory[record_id]
logger.debug("Creating dataset split %s with %s records" %
(split, len(split_dict.keys())))
splitted_inventories[split] = DatasetInventorySplit(
split_dict,
all_labels,
self.labels_numeric_map)
return splitted_inventories
def remap_labels(self, label_map_dict):
""" Remap labels according to mapping dictionary
label_map_dict (dict):
{'species': {'Zebra': 'species', 'Elephant': 'species',
'blank': 'blank'},
'counts': {'1': '1-5'}}
"""
new_inventory = copy.deepcopy(self.data_inventory)
# Loop over records
for record_id, record_value in self.data_inventory.items():
# loop over list of label entries [{species:}, {species:}]
for i, labels in enumerate(record_value['labels']):
# Loop over label names
for label_name, label_value_list in labels.items():
if label_name in label_map_dict:
# loop over label name entries
for j, label_value in enumerate(label_value_list):
if label_value in label_map_dict[label_name]:
new_label = label_map_dict[label_name][label_value]
new_inventory[record_id]['labels'][i][label_name][j][new_label]
self.data_inventory = new_inventory | camera_trap_classifier/data/inventory.py | import random
import json
import logging
import copy
from camera_trap_classifier.data.utils import (
randomly_split_dataset, map_label_list_to_numeric_dict,
export_dict_to_json, _balanced_sampling)
from camera_trap_classifier.data.importer import DatasetImporter
logger = logging.getLogger(__name__)
class DatasetInventory(object):
""" Defines a Datset Inventory - Contains labels, links and data about each
Record
"""
missing_label_value = '-1'
missing_label_value_num = -1
def get_all_record_ids(self):
""" Get all ids of the inventory """
return list(self.data_inventory.keys())
def get_record_id_data(self, record_id):
""" Get content of record id """
return self.data_inventory[record_id]
def get_number_of_records(self):
""" Count and Return number of records """
return len(self.data_inventory.keys())
def remove_record(self, id_to_remove):
""" Remove specific record """
self.data_inventory.pop(id_to_remove, None)
def _get_all_labels(self):
""" Extract all labels
Returns: {'species': ('elephant', 'zebra'),
'count': ('1', '2')}
"""
all_labels = dict()
for k, v in self.data_inventory.items():
for label_entry in v['labels']:
# For each record get and count label types and labels
for label_name, label_value in label_entry.items():
if label_name not in all_labels:
all_labels[label_name] = set()
if not label_value == type(self).missing_label_value:
all_labels[label_name].add(label_value)
return all_labels
def _calc_label_stats(self):
""" Calculate Label Stats
Returns: {'species': {'Zebra': 3, 'Elephant': 6},
'counts': {'1': 5, '2': 10}}
"""
# Calculate and log statistics about labels
label_stats = dict()
for _id, data in self.data_inventory.items():
# For each record get and count label types and labels
for label_entry in data['labels']:
for label_name, label_val in label_entry.items():
if label_name not in label_stats:
label_stats[label_name] = dict()
if label_val not in label_stats[label_name]:
label_stats[label_name][label_val] = 0
label_stats[label_name][label_val] += 1
return label_stats
def log_stats(self, debug_only=False):
""" Logs Statistics about Data Inventory """
label_stats = self._calc_label_stats()
# Log Stats
for label_type, labels in label_stats.items():
label_list = list()
count_list = list()
for label, count in labels.items():
label_list.append(label)
count_list.append(count)
total_counts = sum(count_list)
sort_index = sorted(range(len(count_list)), reverse=True,
key=lambda k: count_list[k])
for idx in sort_index:
if debug_only:
logger.debug(
"Label Type: %s Label: %s Records: %s / %s (%s %%)" %
(label_type, label_list[idx], count_list[idx],
total_counts,
round(100 * (count_list[idx]/total_counts), 4)))
else:
logger.info(
"Label Type: %s Label: %s Records: %s / %s (%s %%)" %
(label_type, label_list[idx], count_list[idx],
total_counts,
round(100 * (count_list[idx]/total_counts), 4)))
def export_to_json(self, json_path):
""" Export Inventory to Json File """
if self.data_inventory is not None:
with open(json_path, 'w') as fp:
json.dump(self.data_inventory, fp)
logger.info("Data Inventory saved to %s" % json_path)
else:
logger.warning("Cant export data inventory to json - no\
inventory created yet")
def export_to_tfrecord(self, tfr_writer, tfr_path,
**kwargs):
""" Export Dataset to TFRecod """
# create tfrecord dictionary
tfrecord_dict = dict()
for _id, record_values in self.data_inventory.items():
tfr_record = self._convert_record_to_tfr_format(
_id, record_values)
tfrecord_dict[_id] = tfr_record
# Write to disk
tfr_writer.encode_to_tfr(tfrecord_dict, tfr_path, **kwargs)
def _convert_record_to_tfr_format(self, id, record):
""" Convert a record to a tfr format """
# Extract and convert meta data information
if 'meta_data' in record.keys():
if isinstance(record['meta_data'], str):
meta_data = record['meta_data']
elif isinstance(record['meta_data'], dict):
meta_data = json.dumps(record['meta_data'])
else:
meta_data = ''
else:
meta_data = ''
# Generate concatenated labels text
label_text = list()
for label in record['labels']:
for label_name, label_value in label.items():
label_text += ['#' + label_name + ':' + label_value]
label_text = ''.join(label_text)
# generate labels dict, save string and numeric labels
labels_dict = dict()
labels_num_dict = dict()
for label in record['labels']:
for label_name, label_value in label.items():
label_id = 'label/' + label_name
label_id_num = 'label_num/' + label_name
if label_name not in labels_dict:
labels_dict[label_id] = []
labels_num_dict[label_id_num] = []
if label_value == type(self).missing_label_value:
val_num = type(self).missing_label_value_num
else:
val_num = self.labels_numeric_map[label_name][label_value]
val = label_value
labels_num_dict[label_id_num].append(val_num)
labels_dict[label_id].append(val)
tfr_data = {
"id": str(id),
"n_images": len(record['images']),
"n_labels": len(record['labels']),
"image_paths": record['images'],
"meta_data": meta_data,
"labelstext": label_text,
**labels_dict,
**labels_num_dict
}
return tfr_data
def export_label_mapping(self, path):
""" Export Label Mapping to Json file """
assert self.labels_numeric_map is not None, \
"Numeric Label Mapping has not been generated"
export_dict_to_json(self.labels_numeric_map, path)
class DatasetInventorySplit(DatasetInventory):
""" Datset Dictionary Split - Does not allow further
manipulations
"""
def __init__(self, data_inventory, labels, labels_numeric_map):
self.data_inventory = data_inventory
self.labels = labels
self.labels_numeric_map = labels_numeric_map
class DatasetInventoryMaster(DatasetInventory):
""" Creates Datset Dictionary from a source and allows to
manipulate labels and create splits
"""
def __init__(self, labels_numeric_map=None):
self.data_inventory = None
self.labels = None
self.labels_numeric_map = labels_numeric_map
def _map_labels_to_numeric(self):
""" Map all labels to numerics """
if self.labels_numeric_map is None:
self.labels = self._get_all_labels()
labels_numeric_map = dict()
for label_name, label_set in self.labels.items():
mapped = map_label_list_to_numeric_dict(list(label_set))
labels_numeric_map[label_name] = mapped
self.labels_numeric_map = labels_numeric_map
# create numeric to text labels as well
self.label_mapping_from_num = \
{k: {kk: vv for vv, kk in v.items()}
for k, v in self.labels_numeric_map.items()}
def create_from_source(self, type, params):
""" Create Dataset Inventory from a specific Source """
importer = DatasetImporter().create(type, params)
self.data_inventory = importer.import_from_source()
# self.label_handler = LabelHandler(self.data_inventory)
# self.label_handler.remove_not_all_label_attributes()
def remove_multi_label_records(self):
""" Remove records with multiple labels / observations """
to_remove = list()
for record_id, data in self.data_inventory.items():
if len(data['labels']) > 1:
to_remove.append(record_id)
logger.info("Removing %s records with multiple labels" %
len(to_remove))
for record_id in to_remove:
self.remove_record(record_id)
def randomly_remove_samples_to_percent(self, p_keep):
""" Randomly sample a percentage of all records """
if not p_keep <= 1:
raise ValueError("p has to be between 0 and 1")
new_data_inv = dict()
all_ids = list(self.data_inventory.keys())
n_total = len(all_ids)
n_choices = int(n_total * p_keep)
choices = random.sample(all_ids, k=n_choices)
for id in choices:
new_data_inv[id] = self.data_inventory[id]
self.data_inventory = new_data_inv
def remove_records_with_label(self, label_name_list, label_value_list):
""" Remove all records with labels in label_name and corresponding
label values
Example: label_name : [species, species]
label_value: ['zebra', 'elephant']
"""
assert all([isinstance(label_name_list, list),
isinstance(label_value_list, list)]), \
"label_name_list and label_value_list must be lists"
for label_name, label_value in zip(label_name_list, label_value_list):
self._remove_records_with_label(label_name, label_value)
def _remove_records_with_label(self, label_name, label_value):
""" Remove all records with 'label_value' for 'label_name'
Example: label_name: 'species' label_value: 'Zebra'
"""
ids_to_remove = list()
for record_id, record_value in self.data_inventory.items():
labels_list = record_value['labels']
for label in labels_list:
for l_name, l_val_list in label.items():
if (label_name == l_name):
if label_value in l_val_list:
ids_to_remove.append(record_id)
logger.info("Removing %s records from label %s with value %s" %
(len(ids_to_remove), label_name, label_value))
for id_to_remove in ids_to_remove:
self.remove_record(id_to_remove)
def keep_only_records_with_label(self, label_name_list, label_value_list):
""" Keep only records with (at least one) of the specified
label_name and corresponding label values
"""
assert all([isinstance(label_name_list, list),
isinstance(label_value_list, list)]), \
"label_name_list and label_value_list must be lists"
to_keep = set()
for label_name, label_value in zip(label_name_list, label_value_list):
to_keep = to_keep.union(
self._keep_only_record_with_label(label_name, label_value))
logger.info("Keeping %s records" % len(to_keep))
to_remove = self.data_inventory.keys() - to_keep
for id_to_remove in to_remove:
self.remove_record(id_to_remove)
def _keep_only_record_with_label(self, label_name, label_value):
""" Keep only records with the label_value of the label_name
"""
ids_to_keep = set()
for record_id, record_value in self.data_inventory.items():
labels_list = record_value['labels']
for label in labels_list:
for l_name, l_val in label.items():
if (label_name == l_name):
if label_value == l_val:
ids_to_keep.add(record_id)
return ids_to_keep
def _remove_records_with_any_missing_label(self):
""" Remove any records with the default missing value of -1 """
ids_to_remove = set()
for record_id, record_value in self.data_inventory.items():
labels_list = record_value['labels']
for label in labels_list:
for l_vals in label.values():
if l_vals == type(self).missing_label_value:
ids_to_remove.add(record_id)
logger.info("Removing %s records with missing labels" %
len(ids_to_remove))
for id_to_remove in ids_to_remove:
self.remove_record(id_to_remove)
def split_inventory_by_random_splits_with_balanced_sample(
self,
split_label_min,
split_names,
split_percent):
""" Split inventory randomly into different sets
according to
split_label_min: e.g 'species'
Returns dict: {'id1': 'test', 'id2': 'train'}
"""
# Create a dictionary mapping each record to label for sampling
ids_to_split_label = dict()
for record_id, record_value in self.data_inventory.items():
first_labels_entry = record_value['labels'][0]
# only consider first entry in labels list
if split_label_min in first_labels_entry:
split_label = first_labels_entry[split_label_min]
ids_to_split_label[record_id] = split_label
split_ids = list(ids_to_split_label.keys())
logger.debug("Found %s record to split randomly" % len(split_ids))
split_assignments = randomly_split_dataset(
split_ids,
split_names,
split_percent,
balanced_sampling_min=True,
balanced_sampling_id_to_label=ids_to_split_label)
logger.debug("Found %s records with split assignments" %
len(split_assignments.keys()))
return self._convert_splits_to_dataset_inventorys(split_assignments)
def split_inventory_by_random_splits(
self,
split_names,
split_percent):
""" Split inventory randomly into different sets
Returns dict: {'id1': 'test', 'id2': 'train'}
"""
split_ids = list(self.data_inventory.keys())
split_assignments = randomly_split_dataset(
split_ids,
split_names,
split_percent,
balanced_sampling_min=False,
balanced_sampling_id_to_label=None)
return self._convert_splits_to_dataset_inventorys(split_assignments)
def split_inventory_by_meta_data_column(
self,
meta_colum
):
""" Split inventory into different sets based on
meta_data_column
"""
split_assignments = dict()
for record_id, record_value in self.data_inventory.items():
meta_val = record_value['meta_data'][meta_colum]
split_assignments[record_id] = meta_val
return self._convert_splits_to_dataset_inventorys(split_assignments)
def split_inventory_by_meta_data_column_and_balanced_sampling(
self,
meta_colum,
split_label_min
):
""" Split inventory into different sets based on
meta_data_column after balanced sampling
"""
id_to_label = dict()
for record_id, record_data in self.data_inventory.items():
# take only the first entry of the labels / observations to assign
# a label for that record
if split_label_min in record_data['labels'][0]:
label = record_data['labels'][0][split_label_min]
id_to_label[record_id] = label
remaining_ids = set(_balanced_sampling(id_to_label))
split_assignments = dict()
for record_id, record_value in self.data_inventory.items():
meta_val = record_value['meta_data'][meta_colum]
if record_id in remaining_ids:
split_assignments[record_id] = meta_val
return self._convert_splits_to_dataset_inventorys(split_assignments)
def _convert_splits_to_dataset_inventorys(self, split_assignments):
""" Convert split assignments to new splitted dataset inventories """
# label overview
all_labels = self._get_all_labels()
self._map_labels_to_numeric()
# Create dictionary with split_name to id mapping
split_to_record = {}
for k, v in split_assignments.items():
split_to_record[v] = split_to_record.get(v, [])
split_to_record[v].append(k)
# Create new splitted data inventories
splitted_inventories = dict()
for split, record_list in split_to_record.items():
split_dict = dict()
for record_id in record_list:
split_dict[record_id] = self.data_inventory[record_id]
logger.debug("Creating dataset split %s with %s records" %
(split, len(split_dict.keys())))
splitted_inventories[split] = DatasetInventorySplit(
split_dict,
all_labels,
self.labels_numeric_map)
return splitted_inventories
def remap_labels(self, label_map_dict):
""" Remap labels according to mapping dictionary
label_map_dict (dict):
{'species': {'Zebra': 'species', 'Elephant': 'species',
'blank': 'blank'},
'counts': {'1': '1-5'}}
"""
new_inventory = copy.deepcopy(self.data_inventory)
# Loop over records
for record_id, record_value in self.data_inventory.items():
# loop over list of label entries [{species:}, {species:}]
for i, labels in enumerate(record_value['labels']):
# Loop over label names
for label_name, label_value_list in labels.items():
if label_name in label_map_dict:
# loop over label name entries
for j, label_value in enumerate(label_value_list):
if label_value in label_map_dict[label_name]:
new_label = label_map_dict[label_name][label_value]
new_inventory[record_id]['labels'][i][label_name][j][new_label]
self.data_inventory = new_inventory | 0.663451 | 0.295807 |
from __future__ import absolute_import, unicode_literals
from optimove.constants import AUTHORIZED_DELIMITERS, UNAUTHORIZED_DELIMITERS
class Customers(object):
client = None
def __init__(self, client):
self.client = client
def get_customers_by_action(self, recipient_group_id, action_id, date, attributes=None, delimiter=';',
top=None, skip=None):
"""Returns the list of customer IDs associated with a particular recipient group and action on
a particular date, plus an optional customer attribute."""
if not recipient_group_id or not action_id or not date:
raise Exception('No RecipientGroupID, ActionID and Date provided')
data = {
'RecipientGroupID': recipient_group_id,
'ActionID': action_id,
'Date': date
}
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
if attributes and type(attributes) == list:
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'attributes': {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
}
results.append(result)
else:
results = [item['CustomerID'] for item in response.json()]
return results
def get_customer_actions_by_target_group(self, target_group_id, date,
include_control_group=False,
attributes=None, delimiter=';',
include_recipient_group_id=False, channel_id=None,
top=None, skip=None):
"""Returns a list of customers and the details of the marketing actions they received, for a
particular target group ID on a particular date."""
if not target_group_id or not date:
raise Exception('No TargetGroupID and Date provided')
data = {
'TargetGroupID': target_group_id,
'Date': date
}
if channel_id:
data['ChannelID'] = channel_id
if include_recipient_group_id:
data['IncludeRecipientGroupID'] = True
if include_control_group:
data['IncludeControlGroup'] = True
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'action_id': item['ActionID'],
'channel_id': item['ChannelID']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_customer_one_time_actions_by_date(self, date, include_control_group=False, attributes=None, delimiter=';',
top=None, skip=None):
"""Returns a list of customers and the details of the marketing actions they received as part of one-time
campaigns executed on a particular date."""
if not date:
raise Exception('No Date provided')
data = {
'Date': date
}
if include_control_group:
data['IncludeControlGroup'] = True
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'action_id': item['ActionID'],
'channel_id': item['ChannelID']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_target_group_changers(self, start_date, end_date, attributes=None, delimiter=';', top=None, skip=None):
"""Returns the before and after target group IDs for customers whose target group changed during a particular
date range."""
if not start_date or not end_date:
raise Exception('No StartDate and EndDate provided')
data = {
'StartDate': start_date,
'EndDate': end_date
}
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'initial_target_group_id': item['InitialTargetGroupID'],
'final_target_group_id': item['FinalTargetGroupID']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_customer_attribute_changers(self, start_date, end_date, changed_customer_attribute,
attributes=None, delimiter=';', top=None, skip=None):
"""Returns an array of customer IDs, and their before and after attribute values, for customers whose selected
attribute changed during a particular date range."""
if not start_date or not end_date or not changed_customer_attribute:
raise Exception('No StartDate, EndDate and ChangedCustomerAttribute provided')
data = {
'StartDate': start_date,
'EndDate': end_date,
'ChangedCustomerAttributes': changed_customer_attribute
}
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'initial_customer_attribute': None if item['InitialCustomerAttributes'] == 'NULL'
else item['InitialCustomerAttributes'],
'final_customer_attribute': None if item['FinalCustomerAttributes'] == 'NULL'
else item['FinalCustomerAttributes']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_customer_future_values(self, life_cycle_stage_id=None, attribute=None, attribute_value=None,
top=None, skip=None):
"""Returns customer IDs and their current future values."""
if not life_cycle_stage_id and not attribute and not attribute_value:
raise Exception('No LifecycleStageID or CustomerAttribute and CustomerAttributeValue provided')
if life_cycle_stage_id and not attribute and not attribute_value:
data = {'LifecycleStageID': life_cycle_stage_id}
elif not life_cycle_stage_id and attribute and attribute_value:
data = {
'CustomerAttributes': attribute,
'CustomerAttributeValue': attribute_value
}
else:
raise Exception('Wrong combination for LifecycleStageID, CustomerAttribute and CustomerAttributeValue')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = {}
for item in response.json():
results[item['CustomerID']] = item['FutureValue']
return results
def get_customer_last_action_executed(self, customer_id):
"""Returns details of the last action executed for a particular customer ID."""
if not customer_id:
raise Exception('No CustomerID provided')
data = {
'CustomerID': customer_id
}
response = self.client.get(self.client.get_url(), data)
if not response:
return False
item = response.json()
return {
'customer_id': customer_id,
'action_id': item['ActionID'],
'date': item['Date'],
'duration': item['Duration'],
'target_group_id': item['TargetGroupID']
}
def get_customer_action_details_by_date(self, date, top=None, skip=None):
"""Returns customer IDs and details of the campaigns sent to them on a particular date."""
if not date:
raise Exception('No Date provided')
data = {
'Date': date
}
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'recipient_group_id': item['RecipientGroupID'],
'action_id': item['ActionID'],
'channel_id': item['ChannelID']
}
results.append(result)
return results
def get_customers_action_ended_by_date(self, date, top=None, skip=None):
"""Returns customer IDs and details of the campaigns they received, for action durations which ended on a
particular date."""
if not date:
raise Exception('No Date provided')
data = {
'Date': date
}
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'target_group_id': item['TargetGroupID'],
'action_id': item['ActionID'],
'date': item['Date'],
'duration': item['Duration'],
'channel_id': item['ChannelID']
}
results.append(result)
return results
def get_customer_send_details_by_campaign(self, campaign_id, include_templates_ids=False, top=None, skip=None):
"""Returns an array of all customer IDs, channel IDs, send times and channel send IDs for
a particular campaign ID."""
if not campaign_id:
raise Exception('No CampaignID provided')
data = {
'CampaignID': campaign_id
}
if include_templates_ids:
data['IncludeTemplateIDs'] = True
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'channel_id': item['ChannelID'],
'scheduled_time': item['ScheduledTime'],
'send_id': item['SendID']
}
if include_templates_ids:
result['template_id'] = item['TemplateID']
results.append(result)
return results
def get_customer_send_details_by_channel(self, channel_id, campaign_id, attributes=None, delimiter=';',
top=None, skip=None):
"""Returns an array of all customer IDs, template IDs, send times and customer attributes for a particular
combination of channel ID and campaign ID."""
if not channel_id or not campaign_id:
raise Exception('No ChannelID and CampaignID provided')
data = {
'ChannelID': channel_id,
'CampaignID': campaign_id
}
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'template_id': item['TemplateID'],
'scheduled_time': item['ScheduledTime']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_currently_targeted_customers(self, top=None, skip=None):
"""Returns an array of all customer IDs currently included in one or more campaigns."""
data = {}
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url()) if not data else self.client.get(self.client.get_url(), data)
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'campaign_id': item['CampaignID'],
'action_id': item['ActionID'],
'start_date': item['StartDate'],
'end_date': item['EndDate']
}
results.append(result)
return results
def get_canceled_campaign_customers(self, campaign_id, top=None, skip=None):
"""Returns an array of all customer IDs that had been included in a campaign that was canceled, along with their
associated action IDs and promo codes."""
if not campaign_id:
raise Exception('No CampaignID provided')
data = {'CampaignID': campaign_id}
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'action_id': item['ActionID'],
'promo_code': item['PromoCode']
}
results.append(result)
return results | optimove/customers.py | from __future__ import absolute_import, unicode_literals
from optimove.constants import AUTHORIZED_DELIMITERS, UNAUTHORIZED_DELIMITERS
class Customers(object):
client = None
def __init__(self, client):
self.client = client
def get_customers_by_action(self, recipient_group_id, action_id, date, attributes=None, delimiter=';',
top=None, skip=None):
"""Returns the list of customer IDs associated with a particular recipient group and action on
a particular date, plus an optional customer attribute."""
if not recipient_group_id or not action_id or not date:
raise Exception('No RecipientGroupID, ActionID and Date provided')
data = {
'RecipientGroupID': recipient_group_id,
'ActionID': action_id,
'Date': date
}
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
if attributes and type(attributes) == list:
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'attributes': {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
}
results.append(result)
else:
results = [item['CustomerID'] for item in response.json()]
return results
def get_customer_actions_by_target_group(self, target_group_id, date,
include_control_group=False,
attributes=None, delimiter=';',
include_recipient_group_id=False, channel_id=None,
top=None, skip=None):
"""Returns a list of customers and the details of the marketing actions they received, for a
particular target group ID on a particular date."""
if not target_group_id or not date:
raise Exception('No TargetGroupID and Date provided')
data = {
'TargetGroupID': target_group_id,
'Date': date
}
if channel_id:
data['ChannelID'] = channel_id
if include_recipient_group_id:
data['IncludeRecipientGroupID'] = True
if include_control_group:
data['IncludeControlGroup'] = True
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'action_id': item['ActionID'],
'channel_id': item['ChannelID']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_customer_one_time_actions_by_date(self, date, include_control_group=False, attributes=None, delimiter=';',
top=None, skip=None):
"""Returns a list of customers and the details of the marketing actions they received as part of one-time
campaigns executed on a particular date."""
if not date:
raise Exception('No Date provided')
data = {
'Date': date
}
if include_control_group:
data['IncludeControlGroup'] = True
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'action_id': item['ActionID'],
'channel_id': item['ChannelID']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_target_group_changers(self, start_date, end_date, attributes=None, delimiter=';', top=None, skip=None):
"""Returns the before and after target group IDs for customers whose target group changed during a particular
date range."""
if not start_date or not end_date:
raise Exception('No StartDate and EndDate provided')
data = {
'StartDate': start_date,
'EndDate': end_date
}
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'initial_target_group_id': item['InitialTargetGroupID'],
'final_target_group_id': item['FinalTargetGroupID']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_customer_attribute_changers(self, start_date, end_date, changed_customer_attribute,
attributes=None, delimiter=';', top=None, skip=None):
"""Returns an array of customer IDs, and their before and after attribute values, for customers whose selected
attribute changed during a particular date range."""
if not start_date or not end_date or not changed_customer_attribute:
raise Exception('No StartDate, EndDate and ChangedCustomerAttribute provided')
data = {
'StartDate': start_date,
'EndDate': end_date,
'ChangedCustomerAttributes': changed_customer_attribute
}
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'initial_customer_attribute': None if item['InitialCustomerAttributes'] == 'NULL'
else item['InitialCustomerAttributes'],
'final_customer_attribute': None if item['FinalCustomerAttributes'] == 'NULL'
else item['FinalCustomerAttributes']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_customer_future_values(self, life_cycle_stage_id=None, attribute=None, attribute_value=None,
top=None, skip=None):
"""Returns customer IDs and their current future values."""
if not life_cycle_stage_id and not attribute and not attribute_value:
raise Exception('No LifecycleStageID or CustomerAttribute and CustomerAttributeValue provided')
if life_cycle_stage_id and not attribute and not attribute_value:
data = {'LifecycleStageID': life_cycle_stage_id}
elif not life_cycle_stage_id and attribute and attribute_value:
data = {
'CustomerAttributes': attribute,
'CustomerAttributeValue': attribute_value
}
else:
raise Exception('Wrong combination for LifecycleStageID, CustomerAttribute and CustomerAttributeValue')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = {}
for item in response.json():
results[item['CustomerID']] = item['FutureValue']
return results
def get_customer_last_action_executed(self, customer_id):
"""Returns details of the last action executed for a particular customer ID."""
if not customer_id:
raise Exception('No CustomerID provided')
data = {
'CustomerID': customer_id
}
response = self.client.get(self.client.get_url(), data)
if not response:
return False
item = response.json()
return {
'customer_id': customer_id,
'action_id': item['ActionID'],
'date': item['Date'],
'duration': item['Duration'],
'target_group_id': item['TargetGroupID']
}
def get_customer_action_details_by_date(self, date, top=None, skip=None):
"""Returns customer IDs and details of the campaigns sent to them on a particular date."""
if not date:
raise Exception('No Date provided')
data = {
'Date': date
}
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'recipient_group_id': item['RecipientGroupID'],
'action_id': item['ActionID'],
'channel_id': item['ChannelID']
}
results.append(result)
return results
def get_customers_action_ended_by_date(self, date, top=None, skip=None):
"""Returns customer IDs and details of the campaigns they received, for action durations which ended on a
particular date."""
if not date:
raise Exception('No Date provided')
data = {
'Date': date
}
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'target_group_id': item['TargetGroupID'],
'action_id': item['ActionID'],
'date': item['Date'],
'duration': item['Duration'],
'channel_id': item['ChannelID']
}
results.append(result)
return results
def get_customer_send_details_by_campaign(self, campaign_id, include_templates_ids=False, top=None, skip=None):
"""Returns an array of all customer IDs, channel IDs, send times and channel send IDs for
a particular campaign ID."""
if not campaign_id:
raise Exception('No CampaignID provided')
data = {
'CampaignID': campaign_id
}
if include_templates_ids:
data['IncludeTemplateIDs'] = True
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'channel_id': item['ChannelID'],
'scheduled_time': item['ScheduledTime'],
'send_id': item['SendID']
}
if include_templates_ids:
result['template_id'] = item['TemplateID']
results.append(result)
return results
def get_customer_send_details_by_channel(self, channel_id, campaign_id, attributes=None, delimiter=';',
top=None, skip=None):
"""Returns an array of all customer IDs, template IDs, send times and customer attributes for a particular
combination of channel ID and campaign ID."""
if not channel_id or not campaign_id:
raise Exception('No ChannelID and CampaignID provided')
data = {
'ChannelID': channel_id,
'CampaignID': campaign_id
}
if attributes and type(attributes) == list:
data['CustomerAttributes'] = ';'.join(attributes)
if delimiter:
if delimiter in AUTHORIZED_DELIMITERS and delimiter not in UNAUTHORIZED_DELIMITERS:
data['CustomerAttributesDelimiter'] = delimiter
else:
raise Exception('Invalid delimiter')
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'template_id': item['TemplateID'],
'scheduled_time': item['ScheduledTime']
}
if attributes and type(attributes) == list:
result['attributes'] = {
key: value for key, value in zip(attributes, item['CustomerAttributes'])
}
results.append(result)
return results
def get_currently_targeted_customers(self, top=None, skip=None):
"""Returns an array of all customer IDs currently included in one or more campaigns."""
data = {}
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url()) if not data else self.client.get(self.client.get_url(), data)
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'campaign_id': item['CampaignID'],
'action_id': item['ActionID'],
'start_date': item['StartDate'],
'end_date': item['EndDate']
}
results.append(result)
return results
def get_canceled_campaign_customers(self, campaign_id, top=None, skip=None):
"""Returns an array of all customer IDs that had been included in a campaign that was canceled, along with their
associated action IDs and promo codes."""
if not campaign_id:
raise Exception('No CampaignID provided')
data = {'CampaignID': campaign_id}
if top and type(top) == int:
data['$top'] = top
if skip and type(skip) == int:
data['$skip'] = skip
response = self.client.get(self.client.get_url(), data)
if not response:
return False
results = list()
for item in response.json():
result = {
'customer_id': item['CustomerID'],
'action_id': item['ActionID'],
'promo_code': item['PromoCode']
}
results.append(result)
return results | 0.757974 | 0.154695 |
import re
class Checker:
def __init__(self):
pass
@staticmethod
def isLevelUp(page_source):
return 'Level Up' in page_source
@staticmethod
def isBotDetected(page_source):
return 'captcha' in page_source
class Url:
def __init__(self):
pass
root = 'http://www.torn.com/'
crime = root + 'crimes.php'
gym = root + 'gym.php'
home = root + 'index.php'
class Util:
def __init__(self):
# zero constructor
pass
@staticmethod
def get_pair_int(text):
try:
ints = re.findall('[\d,]+', text)
return int(ints[0]), int(ints[1])
except ValueError:
return -1, 100
@staticmethod
def get_single_int(text):
try:
return int(re.findall('[\d,]+', text)[0])
except ValueError:
return -1
class Xpath:
def __init__(self):
pass
class Captcha:
def __init__(self):
pass
image_tab = "//div[@id='tabmenu']/ul[1]/li[3]"
class Crime:
def __init__(self):
pass
item = "//ul[@class='item']"
try_again = "//div[@id='try_again']"
class Gym:
def __init__(self):
pass
class Data:
def __init__(self):
pass
strength = "//span[@id='strengthTotal']"
defense = "//span[@id='defenceTotal']"
speed = "//span[@id='speedTotal']"
dexterity = "//span[@id='dexterityTotal']"
class Input:
def __init__(self):
pass
strength = "//input[@name='strength']"
defense = "//input[@name='defense']"
speed = "//input[@name='speed']"
dexterity = "//input[@name='dexterity']"
class Login:
def __init__(self):
pass
email = "//input[@id='player']"
password = "//input[@id='password']"
login = "//input[@class='login'][@type='submit']"
class Profile:
def __init__(self):
pass
energy = "//div[@id='energy']"
nerve = "//div[@id='nerve']"
happy = "//div[@id='happy']"
life = "//div[@id='life']" | util.py |
import re
class Checker:
def __init__(self):
pass
@staticmethod
def isLevelUp(page_source):
return 'Level Up' in page_source
@staticmethod
def isBotDetected(page_source):
return 'captcha' in page_source
class Url:
def __init__(self):
pass
root = 'http://www.torn.com/'
crime = root + 'crimes.php'
gym = root + 'gym.php'
home = root + 'index.php'
class Util:
def __init__(self):
# zero constructor
pass
@staticmethod
def get_pair_int(text):
try:
ints = re.findall('[\d,]+', text)
return int(ints[0]), int(ints[1])
except ValueError:
return -1, 100
@staticmethod
def get_single_int(text):
try:
return int(re.findall('[\d,]+', text)[0])
except ValueError:
return -1
class Xpath:
def __init__(self):
pass
class Captcha:
def __init__(self):
pass
image_tab = "//div[@id='tabmenu']/ul[1]/li[3]"
class Crime:
def __init__(self):
pass
item = "//ul[@class='item']"
try_again = "//div[@id='try_again']"
class Gym:
def __init__(self):
pass
class Data:
def __init__(self):
pass
strength = "//span[@id='strengthTotal']"
defense = "//span[@id='defenceTotal']"
speed = "//span[@id='speedTotal']"
dexterity = "//span[@id='dexterityTotal']"
class Input:
def __init__(self):
pass
strength = "//input[@name='strength']"
defense = "//input[@name='defense']"
speed = "//input[@name='speed']"
dexterity = "//input[@name='dexterity']"
class Login:
def __init__(self):
pass
email = "//input[@id='player']"
password = "//input[@id='password']"
login = "//input[@class='login'][@type='submit']"
class Profile:
def __init__(self):
pass
energy = "//div[@id='energy']"
nerve = "//div[@id='nerve']"
happy = "//div[@id='happy']"
life = "//div[@id='life']" | 0.382487 | 0.105119 |
import argparse
import logging
from multiprocessing import Event, Process
from pathlib import Path
import time
import derp.util
import derp.brain
import derp.camera
import derp.imu
import derp.joystick
import derp.servo
import derp.writer
def all_running(processes):
""" Returns whether all processes are currently alive """
for proc in processes:
proc.join(timeout=0)
if not proc.is_alive():
return False
return True
def loop(config, exit_event, func):
""" Makes running multiprocessing easier """
obj = func(config)
while not exit_event.is_set() and obj.run():
pass
del obj
def main():
""" Prepare arguments, configurations, variables and run the event loop. """
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("config", type=Path, help="Main config path, should include all hardeware")
args = parser.parse_args()
pid_path = '/tmp/derp_drive.pid'
if derp.util.is_already_running(pid_path):
return
derp.util.write_pid(pid_path)
config = derp.util.load_config(args.config)
recording_path = derp.util.make_recording_path()
derp.util.dump_config(config, recording_path / 'config.yaml')
config['recording_path'] = recording_path
logger = derp.util.init_logger('drive', config['recording_path'])
component_map = {
"brain": derp.brain.Clone,
"camera": derp.camera.Camera,
"imu": derp.imu.Imu,
"joystick": derp.joystick.Joystick,
"servo": derp.servo.Servo,
"writer": derp.writer.Writer,
}
processes = []
exit_event = Event()
for name in sorted(component_map):
if name not in config:
logger.info("skip %s", name)
continue
proc_args = (config, exit_event, component_map[name])
proc = Process(target=loop, name=name, args=proc_args)
proc.start()
processes.append(proc)
logger.info("start %s %i", name, proc.pid)
while all_running(processes):
time.sleep(0.1)
exit_event.set()
logger.info("exit")
if __name__ == "__main__":
main() | bin/drive.py | import argparse
import logging
from multiprocessing import Event, Process
from pathlib import Path
import time
import derp.util
import derp.brain
import derp.camera
import derp.imu
import derp.joystick
import derp.servo
import derp.writer
def all_running(processes):
""" Returns whether all processes are currently alive """
for proc in processes:
proc.join(timeout=0)
if not proc.is_alive():
return False
return True
def loop(config, exit_event, func):
""" Makes running multiprocessing easier """
obj = func(config)
while not exit_event.is_set() and obj.run():
pass
del obj
def main():
""" Prepare arguments, configurations, variables and run the event loop. """
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("config", type=Path, help="Main config path, should include all hardeware")
args = parser.parse_args()
pid_path = '/tmp/derp_drive.pid'
if derp.util.is_already_running(pid_path):
return
derp.util.write_pid(pid_path)
config = derp.util.load_config(args.config)
recording_path = derp.util.make_recording_path()
derp.util.dump_config(config, recording_path / 'config.yaml')
config['recording_path'] = recording_path
logger = derp.util.init_logger('drive', config['recording_path'])
component_map = {
"brain": derp.brain.Clone,
"camera": derp.camera.Camera,
"imu": derp.imu.Imu,
"joystick": derp.joystick.Joystick,
"servo": derp.servo.Servo,
"writer": derp.writer.Writer,
}
processes = []
exit_event = Event()
for name in sorted(component_map):
if name not in config:
logger.info("skip %s", name)
continue
proc_args = (config, exit_event, component_map[name])
proc = Process(target=loop, name=name, args=proc_args)
proc.start()
processes.append(proc)
logger.info("start %s %i", name, proc.pid)
while all_running(processes):
time.sleep(0.1)
exit_event.set()
logger.info("exit")
if __name__ == "__main__":
main() | 0.338186 | 0.096535 |
from typing import Dict, Optional, Union, cast
import numpy as np
import pandas as pd
from scipy import sparse
from autosklearn.constants import (
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
MULTIOUTPUT_REGRESSION,
REGRESSION,
)
from autosklearn.data.abstract_data_manager import AbstractDataManager
from autosklearn.data.validation import (
SUPPORTED_FEAT_TYPES,
SUPPORTED_TARGET_TYPES,
)
class XYDataManager(AbstractDataManager):
def __init__(
self,
X: SUPPORTED_FEAT_TYPES,
y: SUPPORTED_TARGET_TYPES,
X_test: Optional[SUPPORTED_FEAT_TYPES],
y_test: Optional[SUPPORTED_TARGET_TYPES],
task: int,
feat_type: Dict[Union[str, int], str],
dataset_name: str
):
super(XYDataManager, self).__init__(dataset_name)
self.info['task'] = task
if sparse.issparse(X):
self.info['is_sparse'] = 1
self.info['has_missing'] = np.all(np.isfinite(cast(sparse.csr_matrix, X).data))
else:
self.info['is_sparse'] = 0
if hasattr(X, 'iloc'):
self.info['has_missing'] = cast(pd.DataFrame, X).isnull().values.any()
else:
self.info['has_missing'] = np.all(np.isfinite(X))
label_num = {
REGRESSION: 1,
BINARY_CLASSIFICATION: 2,
MULTIOUTPUT_REGRESSION: np.shape(y)[-1],
MULTICLASS_CLASSIFICATION: len(np.unique(y)),
MULTILABEL_CLASSIFICATION: np.shape(y)[-1]
}
self.info['label_num'] = label_num[task]
self.data['X_train'] = X
self.data['Y_train'] = y
if X_test is not None:
self.data['X_test'] = X_test
if y_test is not None:
self.data['Y_test'] = y_test
if isinstance(feat_type, dict):
self.feat_type = feat_type
else:
raise ValueError("Unsupported feat_type provided. We expect the user to "
"provide a Dict[str, str] mapping from column to categorical/ "
"numerical.")
# TODO: try to guess task type!
if len(np.shape(y)) > 2:
raise ValueError('y must not have more than two dimensions, '
'but has %d.' % len(np.shape(y)))
if np.shape(X)[0] != np.shape(y)[0]:
raise ValueError('X and y must have the same number of '
'datapoints, but have %d and %d.' % (np.shape(X)[0],
np.shape(y)[0])) | autosklearn/data/xy_data_manager.py | from typing import Dict, Optional, Union, cast
import numpy as np
import pandas as pd
from scipy import sparse
from autosklearn.constants import (
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
MULTIOUTPUT_REGRESSION,
REGRESSION,
)
from autosklearn.data.abstract_data_manager import AbstractDataManager
from autosklearn.data.validation import (
SUPPORTED_FEAT_TYPES,
SUPPORTED_TARGET_TYPES,
)
class XYDataManager(AbstractDataManager):
def __init__(
self,
X: SUPPORTED_FEAT_TYPES,
y: SUPPORTED_TARGET_TYPES,
X_test: Optional[SUPPORTED_FEAT_TYPES],
y_test: Optional[SUPPORTED_TARGET_TYPES],
task: int,
feat_type: Dict[Union[str, int], str],
dataset_name: str
):
super(XYDataManager, self).__init__(dataset_name)
self.info['task'] = task
if sparse.issparse(X):
self.info['is_sparse'] = 1
self.info['has_missing'] = np.all(np.isfinite(cast(sparse.csr_matrix, X).data))
else:
self.info['is_sparse'] = 0
if hasattr(X, 'iloc'):
self.info['has_missing'] = cast(pd.DataFrame, X).isnull().values.any()
else:
self.info['has_missing'] = np.all(np.isfinite(X))
label_num = {
REGRESSION: 1,
BINARY_CLASSIFICATION: 2,
MULTIOUTPUT_REGRESSION: np.shape(y)[-1],
MULTICLASS_CLASSIFICATION: len(np.unique(y)),
MULTILABEL_CLASSIFICATION: np.shape(y)[-1]
}
self.info['label_num'] = label_num[task]
self.data['X_train'] = X
self.data['Y_train'] = y
if X_test is not None:
self.data['X_test'] = X_test
if y_test is not None:
self.data['Y_test'] = y_test
if isinstance(feat_type, dict):
self.feat_type = feat_type
else:
raise ValueError("Unsupported feat_type provided. We expect the user to "
"provide a Dict[str, str] mapping from column to categorical/ "
"numerical.")
# TODO: try to guess task type!
if len(np.shape(y)) > 2:
raise ValueError('y must not have more than two dimensions, '
'but has %d.' % len(np.shape(y)))
if np.shape(X)[0] != np.shape(y)[0]:
raise ValueError('X and y must have the same number of '
'datapoints, but have %d and %d.' % (np.shape(X)[0],
np.shape(y)[0])) | 0.695958 | 0.239816 |
import os
import shutil
import argparse
import tarfile
from encoding.utils import download, mkdir
_TARGET_DIR = os.path.expanduser('../dataset/')
def parse_args():
parser = argparse.ArgumentParser(
description='Initialize PASCAL VOC dataset.',
epilog='Example: python prepare_pascal.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', type=str, help='dataset directory on disk')
parser.add_argument('--overwrite', action='store_true', help='overwrite downloaded files if set, in case they are corrputed')
args = parser.parse_args()
return args
def download_voc(path, overwrite=False):
_DOWNLOAD_URLS = [
('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'4e443f8a2eca6b1dac8a6c57641b67dd40621a49')]
download_dir = os.path.join(path, 'downloads')
mkdir(download_dir)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
def download_aug(path, overwrite=False):
_AUG_DOWNLOAD_URLS = [
('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', '7129e0a480c2d6afb02b517bb18ac54283bfaa35')]
download_dir = os.path.join(path, 'downloads')
mkdir(download_dir)
for url, checksum in _AUG_DOWNLOAD_URLS:
filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
shutil.move(os.path.join(path, 'benchmark_RELEASE'),
os.path.join(path, 'VOCaug'))
filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt']
# generate trainval.txt
with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile:
for fname in filenames:
fname = os.path.join(path, fname)
with open(fname) as infile:
for line in infile:
outfile.write(line)
if __name__ == '__main__':
args = parse_args()
mkdir(os.path.expanduser('~/.encoding/datasets'))
download_voc(_TARGET_DIR, overwrite=False)
download_aug(_TARGET_DIR, overwrite=False) | scripts/prepare_pascal.py | import os
import shutil
import argparse
import tarfile
from encoding.utils import download, mkdir
_TARGET_DIR = os.path.expanduser('../dataset/')
def parse_args():
parser = argparse.ArgumentParser(
description='Initialize PASCAL VOC dataset.',
epilog='Example: python prepare_pascal.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', type=str, help='dataset directory on disk')
parser.add_argument('--overwrite', action='store_true', help='overwrite downloaded files if set, in case they are corrputed')
args = parser.parse_args()
return args
def download_voc(path, overwrite=False):
_DOWNLOAD_URLS = [
('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'4e443f8a2eca6b1dac8a6c57641b67dd40621a49')]
download_dir = os.path.join(path, 'downloads')
mkdir(download_dir)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
def download_aug(path, overwrite=False):
_AUG_DOWNLOAD_URLS = [
('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', '7129e0a480c2d6afb02b517bb18ac54283bfaa35')]
download_dir = os.path.join(path, 'downloads')
mkdir(download_dir)
for url, checksum in _AUG_DOWNLOAD_URLS:
filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
shutil.move(os.path.join(path, 'benchmark_RELEASE'),
os.path.join(path, 'VOCaug'))
filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt']
# generate trainval.txt
with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile:
for fname in filenames:
fname = os.path.join(path, fname)
with open(fname) as infile:
for line in infile:
outfile.write(line)
if __name__ == '__main__':
args = parse_args()
mkdir(os.path.expanduser('~/.encoding/datasets'))
download_voc(_TARGET_DIR, overwrite=False)
download_aug(_TARGET_DIR, overwrite=False) | 0.278551 | 0.108001 |
from selenium import webdriver
from time import sleep
class ChromeAuto:
def __init__(self):
self.driver_path = 'chromedriver'
self.options = webdriver.ChromeOptions()
self.options.add_argument('user-data-dir=Perfil')
self.chrome = webdriver.Chrome(
self.driver_path,
options=self.options
)
def clica_sign_in(self):
try:
btn_sign_in = self.chrome.find_element_by_link_text('Sign in')
btn_sign_in.click()
except Exception as e:
print('Erro ao clicar em Sign in', e)
def acessa(self, site):
self.chrome.get(site)
def sair(self):
self.chrome.quit()
def clica_perfil(self):
try:
perfil = self.chrome.find_element_by_css_selector('body > div.position-relative.js-header-wrapper > header > div.Header-item.position-relative.mr-0.d-none.d-md-flex > details')
perfil.click()
except Exception as e:
print('Erro ao clicar no perfil: ', e)
def faz_logout(self):
try:
perfil = self.chrome.find_element_by_css_selector('body > div.position-relative.js-header-wrapper > header > div.Header-item.position-relative.mr-0.d-none.d-md-flex > details > details-menu > form > button')
perfil.click()
except Exception as e:
print('Erro ao fazer logout: ', e)
def verifica_usuario(self, usuario):
profile_link = self.chrome.find_element_by_class_name('user-profile-link')
profile_link_html = profile_link.get_attribute('innerHTML')
assert usuario in profile_link_html
def log_in(self):
try:
input_login = self.chrome.find_element_by_id('login_field')
input_password = self.chrome.find_element_by_id('password')
btn_login = self.chrome.find_element_by_name('commit')
input_login.send_keys('<EMAIL>rohdpf21@<EMAIL>')
input_password.send_keys('Bxiopczwby<PASSWORD>')
btn_login.click()
except Exception as e:
print('Erro ao fazer o login: ', e)
if __name__ == '__main__':
chrome = ChromeAuto()
chrome.acessa('https://github.com/')
chrome.clica_perfil()
chrome.faz_logout()
chrome.clica_sign_in()
chrome.log_in()
chrome.clica_perfil()
chrome.verifica_usuario('pedrohd212')
sleep(3)
chrome.sair() | Curso_Python/Secao5-modulos-uteis/134_selenium_automatizando_tarefas_navegador/main.py |
from selenium import webdriver
from time import sleep
class ChromeAuto:
def __init__(self):
self.driver_path = 'chromedriver'
self.options = webdriver.ChromeOptions()
self.options.add_argument('user-data-dir=Perfil')
self.chrome = webdriver.Chrome(
self.driver_path,
options=self.options
)
def clica_sign_in(self):
try:
btn_sign_in = self.chrome.find_element_by_link_text('Sign in')
btn_sign_in.click()
except Exception as e:
print('Erro ao clicar em Sign in', e)
def acessa(self, site):
self.chrome.get(site)
def sair(self):
self.chrome.quit()
def clica_perfil(self):
try:
perfil = self.chrome.find_element_by_css_selector('body > div.position-relative.js-header-wrapper > header > div.Header-item.position-relative.mr-0.d-none.d-md-flex > details')
perfil.click()
except Exception as e:
print('Erro ao clicar no perfil: ', e)
def faz_logout(self):
try:
perfil = self.chrome.find_element_by_css_selector('body > div.position-relative.js-header-wrapper > header > div.Header-item.position-relative.mr-0.d-none.d-md-flex > details > details-menu > form > button')
perfil.click()
except Exception as e:
print('Erro ao fazer logout: ', e)
def verifica_usuario(self, usuario):
profile_link = self.chrome.find_element_by_class_name('user-profile-link')
profile_link_html = profile_link.get_attribute('innerHTML')
assert usuario in profile_link_html
def log_in(self):
try:
input_login = self.chrome.find_element_by_id('login_field')
input_password = self.chrome.find_element_by_id('password')
btn_login = self.chrome.find_element_by_name('commit')
input_login.send_keys('<EMAIL>rohdpf21@<EMAIL>')
input_password.send_keys('Bxiopczwby<PASSWORD>')
btn_login.click()
except Exception as e:
print('Erro ao fazer o login: ', e)
if __name__ == '__main__':
chrome = ChromeAuto()
chrome.acessa('https://github.com/')
chrome.clica_perfil()
chrome.faz_logout()
chrome.clica_sign_in()
chrome.log_in()
chrome.clica_perfil()
chrome.verifica_usuario('pedrohd212')
sleep(3)
chrome.sair() | 0.332202 | 0.054601 |
import logging
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib import hub
from ryu.topology import event, switches
from ryu.topology.api import get_switch, get_link, get_host
from ryu import cfg
CONF = cfg.CONF
class test_wpq(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(test_wpq, self).__init__(*args, **kwargs)
self.topology_api_app = self
self.link_to_port = {}
self.host_or_switch = {}
self.switch_port_table = {}
self.name = "wpq"
self.discover_thread = hub.spawn(self._discover_links)
#A thread to output the information of topology
def _discover_links(self):
while True:
self.get_topology(None)
try:
self.show_topology()
except Exception as err:
print ("please input pingall in mininet and wait a memment")
hub.sleep(5)
#add entry of table-miss
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_feature_handle(self, ev):
msg = ev.msg
print (msg)
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
self.logger.info("switch %s is connected", datapath.id)
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]
self.add_flow(datapath=datapath, priority=0, actions=actions, match=match)
def add_flow(self, datapath, priority, actions, match, idle_timeout=0, hard_timeout=0):
ofp = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
datapath.send_msg(mod)
#fill the port of switch imformation
def create_map(self, switch_list):
for sw in switch_list:
dpid = sw.dp.id
self.switch_port_table.setdefault(dpid, set())
for p in sw.ports:
self.switch_port_table[dpid].add(p.port_no)
# print "--------------交换机端口情况---------------"
# print self.switch_port_table
#fill the link information
def create_link_port(self, link_list, host_list):
for link in link_list:
src = link.src
dst = link.dst
self.link_to_port[(src.dpid, src.port_no)] = (dst.dpid, dst.port_no)
self.link_to_port[(dst.dpid, dst.port_no)] = (src.dpid, src.port_no)
self.host_or_switch[(src.dpid, src.port_no)] = 1
self.host_or_switch[(dst.dpid, dst.port_no)] = 1
for host in host_list:
port = host.port
self.link_to_port[(port.dpid, port.port_no)] = (host.mac, host.ipv4)
self.host_or_switch[(port.dpid, port.port_no)] = 2
#packein message handler (it is useless in this function)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packetin_handler(self, ev):
# print ev.msg
msg = ev.msg
pkt = packet.Packet(msg.data)
# print pkt.get_protocols
dpid = msg.datapath.id
port = msg.match['in_port']
self.get_topology(None)
events = [event.EventSwitchEnter,
event.EventSwitchLeave, event.EventPortAdd,
event.EventPortDelete, event.EventPortModify,
event.EventLinkAdd, event.EventLinkDelete]
#monitor the change in link information
@set_ev_cls(events)
def get_topology(self, ev):
self.create_map(get_switch(self.topology_api_app))
# print get_host(self.topology_api_app)
# print type(get_host(self.topology_api_app))
self.create_link_port(get_link(self.topology_api_app), get_host(self.topology_api_app))
# self.show_topology()
#some command line output typesetting
def show_topology(self):
i = 1
print ("")
print ("")
print ("")
print ("----------------" * 2, "physical topology", "----------------" * 6)
for dpid in self.switch_port_table.keys():
print ("switch%d ----------dpid---------- " % i,)
for port_no in self.switch_port_table[dpid]:
print ("-----------port %s-----------" % port_no,)
print ("")
print (" ", "%11d" % dpid ,"%12s" % " ",)
# # print self.switch_port_table[dpid]
try:
for port_no in self.switch_port_table[dpid]:
if self.host_or_switch[(dpid, port_no)] == 1:
print ("%10s" % "switch", "%d" % self.link_to_port[(dpid, port_no)][0], " port %d" % self.link_to_port[(dpid, port_no)][1], " ",)
elif self.host_or_switch[(dpid, port_no)] == 2:
print ("%s" % "host", "mac: %s" % self.link_to_port[(dpid, port_no)][0],)
else:
print ("%28s" % "None")
print ("")
print (" ", "%23s" % " ",)
for port_no in self.switch_port_table[dpid]:
if self.host_or_switch[(dpid, port_no)] == 2:
print (" ipv4 :", self.link_to_port[(dpid, port_no)][1],)
else:
print ("%28s" % " ",)
print
except Exception as error:
print ("please input pingall in mininet and wait a momment until it's finished")
i = i + 1
print ("------------------" * 8)
print ("")
print ("")
print ("") | ryu/app/aa.py |
import logging
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib import hub
from ryu.topology import event, switches
from ryu.topology.api import get_switch, get_link, get_host
from ryu import cfg
CONF = cfg.CONF
class test_wpq(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(test_wpq, self).__init__(*args, **kwargs)
self.topology_api_app = self
self.link_to_port = {}
self.host_or_switch = {}
self.switch_port_table = {}
self.name = "wpq"
self.discover_thread = hub.spawn(self._discover_links)
#A thread to output the information of topology
def _discover_links(self):
while True:
self.get_topology(None)
try:
self.show_topology()
except Exception as err:
print ("please input pingall in mininet and wait a memment")
hub.sleep(5)
#add entry of table-miss
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_feature_handle(self, ev):
msg = ev.msg
print (msg)
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
self.logger.info("switch %s is connected", datapath.id)
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]
self.add_flow(datapath=datapath, priority=0, actions=actions, match=match)
def add_flow(self, datapath, priority, actions, match, idle_timeout=0, hard_timeout=0):
ofp = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
datapath.send_msg(mod)
#fill the port of switch imformation
def create_map(self, switch_list):
for sw in switch_list:
dpid = sw.dp.id
self.switch_port_table.setdefault(dpid, set())
for p in sw.ports:
self.switch_port_table[dpid].add(p.port_no)
# print "--------------交换机端口情况---------------"
# print self.switch_port_table
#fill the link information
def create_link_port(self, link_list, host_list):
for link in link_list:
src = link.src
dst = link.dst
self.link_to_port[(src.dpid, src.port_no)] = (dst.dpid, dst.port_no)
self.link_to_port[(dst.dpid, dst.port_no)] = (src.dpid, src.port_no)
self.host_or_switch[(src.dpid, src.port_no)] = 1
self.host_or_switch[(dst.dpid, dst.port_no)] = 1
for host in host_list:
port = host.port
self.link_to_port[(port.dpid, port.port_no)] = (host.mac, host.ipv4)
self.host_or_switch[(port.dpid, port.port_no)] = 2
#packein message handler (it is useless in this function)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packetin_handler(self, ev):
# print ev.msg
msg = ev.msg
pkt = packet.Packet(msg.data)
# print pkt.get_protocols
dpid = msg.datapath.id
port = msg.match['in_port']
self.get_topology(None)
events = [event.EventSwitchEnter,
event.EventSwitchLeave, event.EventPortAdd,
event.EventPortDelete, event.EventPortModify,
event.EventLinkAdd, event.EventLinkDelete]
#monitor the change in link information
@set_ev_cls(events)
def get_topology(self, ev):
self.create_map(get_switch(self.topology_api_app))
# print get_host(self.topology_api_app)
# print type(get_host(self.topology_api_app))
self.create_link_port(get_link(self.topology_api_app), get_host(self.topology_api_app))
# self.show_topology()
#some command line output typesetting
def show_topology(self):
i = 1
print ("")
print ("")
print ("")
print ("----------------" * 2, "physical topology", "----------------" * 6)
for dpid in self.switch_port_table.keys():
print ("switch%d ----------dpid---------- " % i,)
for port_no in self.switch_port_table[dpid]:
print ("-----------port %s-----------" % port_no,)
print ("")
print (" ", "%11d" % dpid ,"%12s" % " ",)
# # print self.switch_port_table[dpid]
try:
for port_no in self.switch_port_table[dpid]:
if self.host_or_switch[(dpid, port_no)] == 1:
print ("%10s" % "switch", "%d" % self.link_to_port[(dpid, port_no)][0], " port %d" % self.link_to_port[(dpid, port_no)][1], " ",)
elif self.host_or_switch[(dpid, port_no)] == 2:
print ("%s" % "host", "mac: %s" % self.link_to_port[(dpid, port_no)][0],)
else:
print ("%28s" % "None")
print ("")
print (" ", "%23s" % " ",)
for port_no in self.switch_port_table[dpid]:
if self.host_or_switch[(dpid, port_no)] == 2:
print (" ipv4 :", self.link_to_port[(dpid, port_no)][1],)
else:
print ("%28s" % " ",)
print
except Exception as error:
print ("please input pingall in mininet and wait a momment until it's finished")
i = i + 1
print ("------------------" * 8)
print ("")
print ("")
print ("") | 0.182316 | 0.090053 |
import collections
from io import StringIO
import os
import time
import requests
from ns_stitchclient.transit.writer import Writer
DEFAULT_MAX_BATCH_SIZE_BYTES = 4194304
DEFAULT_BATCH_DELAY_SECONDS = 60.0
MAX_MESSAGES_PER_BATCH = 20000
DEFAULT_STITCH_URL = 'https://api.stitchdata.com/v2/import/push'
class MessageTooLargeException(Exception):
pass
def encode_transit(records):
'''Returns the records serialized as Transit/json in utf8'''
with StringIO() as buf:
writer = Writer(buf, "json")
writer.write(records)
return buf.getvalue().encode('utf8')
def partition_batch(entries, max_batch_size_bytes):
start = 0
end = len(entries)
result = []
while start < end:
partitioned_entries = entries[start : end]
records = [e.value for e in partitioned_entries]
encoded = encode_transit(records)
if len(encoded) <= max_batch_size_bytes:
result.append((encoded, [e.callback_arg for e in partitioned_entries]))
# If end is less than length of entries we're not done yet.
# Advance start to end, and advance end by the number of
# records we just put in the batch.
if end < len(entries):
start = end
end = min(end + len(records), len(entries))
# If end is at the end of the input entries, we're done.
else:
break
# The size of the encoded records in our range is too large. If we
# have more than one record in our range, cut the range in half
# and try again.
elif end - start > 1:
end = start + (end - start) // 2
else:
raise MessageTooLargeException(
('A single message is larger then the maximum batch size. ' +
'Message size: {}. Max batch size: {}')
.format(len(encoded), max_batch_size_bytes))
return result
BufferEntry = collections.namedtuple(
'BufferEntry',
['value', 'callback_arg'])
BatchStatsEntry = collections.namedtuple(
'BatchStatsEntry', ['num_records', 'num_bytes'])
class Client(object):
def __init__(self,
client_id,
token,
table_name=None,
key_names=None,
callback_function=None,
stitch_url=DEFAULT_STITCH_URL,
max_batch_size_bytes=DEFAULT_MAX_BATCH_SIZE_BYTES,
batch_delay_seconds=DEFAULT_BATCH_DELAY_SECONDS):
assert isinstance(client_id, int), 'client_id is not an integer: {}'.format(client_id) # nopep8
self.max_messages_per_batch = MAX_MESSAGES_PER_BATCH
self.client_id = client_id
self.token = token
self.table_name = table_name
self.key_names = key_names
self.stitch_url = stitch_url
self.max_batch_size_bytes = max_batch_size_bytes
self.batch_delay_seconds = batch_delay_seconds
self.callback_function = callback_function
self._buffer = []
# Stats we update as we send records
self.time_last_batch_sent = time.time()
self.batch_stats = collections.deque(maxlen=100)
# We'll try using a big batch size to start out
self.target_messages_per_batch = self.max_messages_per_batch
def _add_message(self, message, callback_arg):
self._buffer.append(BufferEntry(value=message,
callback_arg=callback_arg))
def moving_average_bytes_per_record(self):
num_records = 0
num_bytes = 0
for stats in self.batch_stats:
num_records += stats.num_records
num_bytes += stats.num_bytes
return num_bytes // num_records
def push(self, message, callback_arg=None):
"""message should be a dict recognized by the Stitch Import API.
See https://www.stitchdata.com/docs/integrations/import-api.
"""
if message['action'] == 'upsert':
message.setdefault('key_names', self.key_names)
message['client_id'] = self.client_id
message.setdefault('table_name', self.table_name)
self._add_message(message, callback_arg)
batch = self._take_batch(self.target_messages_per_batch)
if batch:
self._send_batch(batch)
def _take_batch(self, min_records):
'''If we have enough data to build a batch, returns all the data in the
buffer and then clears the buffer.'''
if not self._buffer:
return []
enough_messages = len(self._buffer) >= min_records
enough_time = time.time() - self.time_last_batch_sent >= self.batch_delay_seconds
ready = enough_messages or enough_time
if not ready:
return []
result = list(self._buffer)
self._buffer.clear()
return result
def _send_batch(self, batch):
for body, callback_args in partition_batch(batch, self.max_batch_size_bytes):
self._send(body, callback_args)
try:
moving_average = self.moving_average_bytes_per_record()
self.target_messages_per_batch = \
min(self.max_messages_per_batch,
0.8 * (self.max_batch_size_bytes / moving_average))
except ZeroDivisionError:
# Handle the case where there are no records
pass
def _stitch_request(self, body):
headers = {'Authorization': 'Bearer {}'.format(self.token),
'Content-Type': 'application/transit+json'}
return requests.post(self.stitch_url, headers=headers, data=body)
def _send(self, body, callback_args):
response = self._stitch_request(body)
if response.status_code < 300:
if self.callback_function is not None:
self.callback_function(callback_args)
else:
raise RuntimeError("Error sending data to the Stitch API. {0.status_code} - {0.content}" # nopep8
.format(response))
self.time_last_batch_sent = time.time()
self.batch_stats.append(BatchStatsEntry(len(callback_args), len(body)))
def flush(self):
batch = self._take_batch(0)
self._send_batch(batch)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.flush()
if __name__ == "__main__":
with Client(int(os.environ['STITCH_CLIENT_ID']),
os.environ['STITCH_TOKEN'],
callback_function=print) as c:
for i in range(1, 10):
c.push({'action': 'upsert',
'table_name': 'test_table',
'key_names': ['id'],
'sequence': i,
'data': {'id': i, 'value': 'abc'}}, i) | ns_stitchclient/client.py | import collections
from io import StringIO
import os
import time
import requests
from ns_stitchclient.transit.writer import Writer
DEFAULT_MAX_BATCH_SIZE_BYTES = 4194304
DEFAULT_BATCH_DELAY_SECONDS = 60.0
MAX_MESSAGES_PER_BATCH = 20000
DEFAULT_STITCH_URL = 'https://api.stitchdata.com/v2/import/push'
class MessageTooLargeException(Exception):
pass
def encode_transit(records):
'''Returns the records serialized as Transit/json in utf8'''
with StringIO() as buf:
writer = Writer(buf, "json")
writer.write(records)
return buf.getvalue().encode('utf8')
def partition_batch(entries, max_batch_size_bytes):
start = 0
end = len(entries)
result = []
while start < end:
partitioned_entries = entries[start : end]
records = [e.value for e in partitioned_entries]
encoded = encode_transit(records)
if len(encoded) <= max_batch_size_bytes:
result.append((encoded, [e.callback_arg for e in partitioned_entries]))
# If end is less than length of entries we're not done yet.
# Advance start to end, and advance end by the number of
# records we just put in the batch.
if end < len(entries):
start = end
end = min(end + len(records), len(entries))
# If end is at the end of the input entries, we're done.
else:
break
# The size of the encoded records in our range is too large. If we
# have more than one record in our range, cut the range in half
# and try again.
elif end - start > 1:
end = start + (end - start) // 2
else:
raise MessageTooLargeException(
('A single message is larger then the maximum batch size. ' +
'Message size: {}. Max batch size: {}')
.format(len(encoded), max_batch_size_bytes))
return result
BufferEntry = collections.namedtuple(
'BufferEntry',
['value', 'callback_arg'])
BatchStatsEntry = collections.namedtuple(
'BatchStatsEntry', ['num_records', 'num_bytes'])
class Client(object):
def __init__(self,
client_id,
token,
table_name=None,
key_names=None,
callback_function=None,
stitch_url=DEFAULT_STITCH_URL,
max_batch_size_bytes=DEFAULT_MAX_BATCH_SIZE_BYTES,
batch_delay_seconds=DEFAULT_BATCH_DELAY_SECONDS):
assert isinstance(client_id, int), 'client_id is not an integer: {}'.format(client_id) # nopep8
self.max_messages_per_batch = MAX_MESSAGES_PER_BATCH
self.client_id = client_id
self.token = token
self.table_name = table_name
self.key_names = key_names
self.stitch_url = stitch_url
self.max_batch_size_bytes = max_batch_size_bytes
self.batch_delay_seconds = batch_delay_seconds
self.callback_function = callback_function
self._buffer = []
# Stats we update as we send records
self.time_last_batch_sent = time.time()
self.batch_stats = collections.deque(maxlen=100)
# We'll try using a big batch size to start out
self.target_messages_per_batch = self.max_messages_per_batch
def _add_message(self, message, callback_arg):
self._buffer.append(BufferEntry(value=message,
callback_arg=callback_arg))
def moving_average_bytes_per_record(self):
num_records = 0
num_bytes = 0
for stats in self.batch_stats:
num_records += stats.num_records
num_bytes += stats.num_bytes
return num_bytes // num_records
def push(self, message, callback_arg=None):
"""message should be a dict recognized by the Stitch Import API.
See https://www.stitchdata.com/docs/integrations/import-api.
"""
if message['action'] == 'upsert':
message.setdefault('key_names', self.key_names)
message['client_id'] = self.client_id
message.setdefault('table_name', self.table_name)
self._add_message(message, callback_arg)
batch = self._take_batch(self.target_messages_per_batch)
if batch:
self._send_batch(batch)
def _take_batch(self, min_records):
'''If we have enough data to build a batch, returns all the data in the
buffer and then clears the buffer.'''
if not self._buffer:
return []
enough_messages = len(self._buffer) >= min_records
enough_time = time.time() - self.time_last_batch_sent >= self.batch_delay_seconds
ready = enough_messages or enough_time
if not ready:
return []
result = list(self._buffer)
self._buffer.clear()
return result
def _send_batch(self, batch):
for body, callback_args in partition_batch(batch, self.max_batch_size_bytes):
self._send(body, callback_args)
try:
moving_average = self.moving_average_bytes_per_record()
self.target_messages_per_batch = \
min(self.max_messages_per_batch,
0.8 * (self.max_batch_size_bytes / moving_average))
except ZeroDivisionError:
# Handle the case where there are no records
pass
def _stitch_request(self, body):
headers = {'Authorization': 'Bearer {}'.format(self.token),
'Content-Type': 'application/transit+json'}
return requests.post(self.stitch_url, headers=headers, data=body)
def _send(self, body, callback_args):
response = self._stitch_request(body)
if response.status_code < 300:
if self.callback_function is not None:
self.callback_function(callback_args)
else:
raise RuntimeError("Error sending data to the Stitch API. {0.status_code} - {0.content}" # nopep8
.format(response))
self.time_last_batch_sent = time.time()
self.batch_stats.append(BatchStatsEntry(len(callback_args), len(body)))
def flush(self):
batch = self._take_batch(0)
self._send_batch(batch)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.flush()
if __name__ == "__main__":
with Client(int(os.environ['STITCH_CLIENT_ID']),
os.environ['STITCH_TOKEN'],
callback_function=print) as c:
for i in range(1, 10):
c.push({'action': 'upsert',
'table_name': 'test_table',
'key_names': ['id'],
'sequence': i,
'data': {'id': i, 'value': 'abc'}}, i) | 0.536556 | 0.242262 |
import json
import time
from typing import Any
import nonebot
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from nonebot.adapters import Bot
from nonebot.log import logger
from .config import Config
_cooldown_events = {}
driver = nonebot.get_driver()
config = Config(**driver.config.dict())
BACKUP_FILE = config.cd_backup_file
scheduler = AsyncIOScheduler()
def set_event(token: str, duration: int, event_type='normal', **kwargs) -> (
None):
"""
添加/更新冷却事件
参数:
- `token: str`:事件标签。
- `duration: int`:冷却事件持续时间(秒)。
关键字参数:
- `event_type: str`:事件类型,默认为 `normal`。包括:
- `global`:全局冷却事件;
- `group`:群组冷却事件,需要额外的关键字参数 `group: int` 指定群组 ID;
- `normal`:一般冷却事件,需要额外的关键字参数 `group: int` 和
`user: int` 分别指定群组 ID 和用户 ID;
- `user`:用户冷却事件,需要额外的关键字参数 `user: int` 指定用户 ID。
"""
global _cooldown_events
group = kwargs.get('group') if event_type in ('group', 'normal') else 0
user = kwargs.get('user') if event_type in ('normal', 'user') else 0
if not _cooldown_events.get(token):
_cooldown_events[token] = []
current_time = int(time.time())
result = {
'group': group,
'user': user,
'expired_time': current_time + duration
}
# 更新记录
for i, record in enumerate(_cooldown_events[token]):
if record.get('group') == group and record.get('user') == user:
_cooldown_events[token][i] = result
logger.debug(f'Cooldown event {token}({result}) has been updated.')
return
# 添加记录
_cooldown_events[token].append(result)
logger.warning(_cooldown_events)
logger.debug(f'Cooldown event {token}({result}) has been set.')
def get_event(token: str, ignore_priority=False, event_type='normal',
**kwargs) -> dict[str, Any]:
"""
获取冷却事件状态。
通常情况下,当存在较高优先级的事件正在生效时,返回较高优先级的事件状态。详
见参数 `ignore_priority` 注释。
参数:
- `token: str`:事件标签。
关键字参数:
- `ignore_priority: bool`:忽略事件优先级,默认为 `False`。当忽略事件优先级
时,将严格根据事件类型返回对应的事件状态,否则返回较高优先级的事件状态。
- `event_type: str`:事件类型,默认为 `normal`。包括:
- `global`:全局冷却事件;
- `group`:群组冷却事件,需要额外的关键字参数 `group: int` 指定群组 ID;
- `normal`:一般冷却事件,需要额外的关键字参数 `group: int` 和
`user: int` 分别指定群组 ID 和用户 ID;
- `user`:用户冷却事件,需要额外的关键字参数 `user: int` 指定用户 ID。
返回:
- `Dict[str, Any]`:事件状态。包含两个字段:
- `status: bool`:冷却状态,其中 `True` 表示冷却正在生效,反之则为
`False`;
- `remaining: int`:冷却剩余时间(秒),当 `status` 字段值为 `False` 时
该字段值为 0。
"""
global _cooldown_events
status = False
remaining = 0
current_time = int(time.time())
group = kwargs.get('group') if event_type in ('group', 'normal') else 0
user = kwargs.get('user') if event_type in ('normal', 'user') else 0
if (records := _cooldown_events.get(token)):
for record in records:
record_group = record.get('group')
record_user = record.get('user')
expired_time = record.get('expired_time')
# 冷却事件正在生效
is_valid = expired_time - current_time >= 0
# 事件记录为全局冷却事件
is_global_record = not record_group and not record_user
# 事件记录为群组冷却事件
is_group_record = record_group == group and not record_user
# 事件记录为一般冷却事件
is_normal_record = record_group == group and record_user == user
# 事件记录为用户冷却事件
is_user_record = not record_group and record_user == user
# 忽略优先级模式
ignore_priority_pattern = ignore_priority and is_normal_record
# 一般模式
normal_pattern = (is_global_record or is_group_record or
is_normal_record or is_user_record)
if (ignore_priority_pattern or normal_pattern) and is_valid:
status = True
remaining = expired_time - current_time
return {
'status': status,
'remaining': remaining
}
def del_event(token: str, event_type='normal', **kwargs) -> None:
"""
移除冷却事件。
参数:
- `token: str`:事件标签。
关键字参数:
- `event_type: str`:事件类型,默认为 `normal`。包括:
- `global`:全局冷却事件;
- `group`:群组冷却事件,需要额外的关键字参数 `group: int` 指定群组 ID;
- `normal`:一般冷却事件,需要额外的关键字参数 `group: int` 和
`user: int` 分别指定群组 ID 和用户 ID;
- `user`:用户冷却事件,需要额外的关键字参数 `user: int` 指定用户 ID。
"""
global _cooldown_events
group = kwargs.get('group') if event_type in ('group', 'normal') else 0
user = kwargs.get('user') if event_type in ('normal', 'user') else 0
records = _cooldown_events.get(token)
for i, record in enumerate(records):
if record.get('group') == group and record.get('user') == user:
del records[i]
logger.info(f'Event {token}({record}) has been removed manually.')
def time_format(timestamp: int, preset='std') -> str:
"""
格式化输出剩余时间信息。
参数:
- `timestamp: int`:时间戳。
关键字参数:
- `preset: str`:格式名称,可用的格式名称有:
- `std`:标准格式,以冒号分隔日、时、分、秒,例如 `05:04:03:02`;
- `zh`:中文格式,例如 `5天4小时3分2秒`。
默认值为 `std`。
返回:
- `str`:格式化的时间信息
"""
days = abs(timestamp) // 86400
hours = (abs(timestamp) - days * 86400) // 3600
minutes = (abs(timestamp) - days * 86400 - hours * 3600) // 60
seconds = abs(timestamp) - days * 86400 - hours * 3600 - minutes * 60
if preset == 'std':
return (f'{str(days).zfill(2)}:{str(hours).zfill(2)}:'
f'{str(minutes).zfill(2)}:{str(seconds).zfill(2)}')
elif preset == 'zh':
result = []
if days:
result.append(f'{days}天')
if hours:
result.append(f'{hours}小时')
if minutes:
result.append(f'{minutes}分')
if seconds or (not days and not hours and not minutes):
result.append(f'{seconds}秒')
return ''.join(result)
@driver.on_startup
def _init() -> None:
"""初始化 scheduler。"""
if not scheduler.running:
scheduler.start()
logger.info('Scheduler started')
@driver.on_startup
def _restore() -> None:
"""驱动启动时从备份文件恢复数据。"""
global _cooldown_events
if BACKUP_FILE.exists():
with open(BACKUP_FILE) as backup:
_cooldown_events = json.load(backup)
logger.debug(f'Restored data from file {BACKUP_FILE.absolute()}.')
else:
logger.warning(f'Backup file {BACKUP_FILE.absolute()} does not exist, '
'skip restoring.')
@driver.on_startup
@scheduler.scheduled_job('interval', seconds=config.cd_autoremove_period)
def _remove_expired() -> None:
"""
自动移除过期事件。
自动移除时间间隔可通过配置项 `CD_AUTOREMOVE_PERIOD` 自定义,默认时间为 3600
秒。
"""
global _cooldown_events
count = 0
current_time = int(time.time())
# 移除过期的事件记录
for _, records in _cooldown_events.items():
for i, record in enumerate(records):
if record.get('expired_time') - current_time <= 0:
del records[i]
count += 1
# 移除无事件记录的事件标签
_cooldown_events = {k: v for k, v in _cooldown_events.items() if v}
logger.debug(f'Automatically removed expired cooldown records: '
f'{count} {"records" if count != 1 else "event"} removed.')
@driver.on_startup
@scheduler.scheduled_job('interval', seconds=config.cd_autobackup_period)
def _auto_backup() -> None:
"""
自动备份数据。
自动备份时间周期可通过配置项 `CD_AUTOBACKUP_PERIOD` 自定义,默认时间为 600
秒。
"""
_backup()
@driver.on_bot_disconnect
async def _backup_on_disconnect(bot: Bot) -> None:
"""
Bot 断开连接时备份数据。
参数:
- `bot: nonebot.adapters.cqhttp.Bot`:Bot 对象。
"""
_backup()
def _backup() -> None:
"""备份冷却事件。"""
global _cooldown_events
if not (path := BACKUP_FILE.parent).is_dir():
path.mkdir()
with open(BACKUP_FILE, 'w') as backup:
json.dump(_cooldown_events, backup, indent=4)
logger.debug(f'Backed up cooldown data to file '
f'{BACKUP_FILE.absolute()}.') | nonebot_plugin_cooldown/cooldown.py | import json
import time
from typing import Any
import nonebot
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from nonebot.adapters import Bot
from nonebot.log import logger
from .config import Config
_cooldown_events = {}
driver = nonebot.get_driver()
config = Config(**driver.config.dict())
BACKUP_FILE = config.cd_backup_file
scheduler = AsyncIOScheduler()
def set_event(token: str, duration: int, event_type='normal', **kwargs) -> (
None):
"""
添加/更新冷却事件
参数:
- `token: str`:事件标签。
- `duration: int`:冷却事件持续时间(秒)。
关键字参数:
- `event_type: str`:事件类型,默认为 `normal`。包括:
- `global`:全局冷却事件;
- `group`:群组冷却事件,需要额外的关键字参数 `group: int` 指定群组 ID;
- `normal`:一般冷却事件,需要额外的关键字参数 `group: int` 和
`user: int` 分别指定群组 ID 和用户 ID;
- `user`:用户冷却事件,需要额外的关键字参数 `user: int` 指定用户 ID。
"""
global _cooldown_events
group = kwargs.get('group') if event_type in ('group', 'normal') else 0
user = kwargs.get('user') if event_type in ('normal', 'user') else 0
if not _cooldown_events.get(token):
_cooldown_events[token] = []
current_time = int(time.time())
result = {
'group': group,
'user': user,
'expired_time': current_time + duration
}
# 更新记录
for i, record in enumerate(_cooldown_events[token]):
if record.get('group') == group and record.get('user') == user:
_cooldown_events[token][i] = result
logger.debug(f'Cooldown event {token}({result}) has been updated.')
return
# 添加记录
_cooldown_events[token].append(result)
logger.warning(_cooldown_events)
logger.debug(f'Cooldown event {token}({result}) has been set.')
def get_event(token: str, ignore_priority=False, event_type='normal',
**kwargs) -> dict[str, Any]:
"""
获取冷却事件状态。
通常情况下,当存在较高优先级的事件正在生效时,返回较高优先级的事件状态。详
见参数 `ignore_priority` 注释。
参数:
- `token: str`:事件标签。
关键字参数:
- `ignore_priority: bool`:忽略事件优先级,默认为 `False`。当忽略事件优先级
时,将严格根据事件类型返回对应的事件状态,否则返回较高优先级的事件状态。
- `event_type: str`:事件类型,默认为 `normal`。包括:
- `global`:全局冷却事件;
- `group`:群组冷却事件,需要额外的关键字参数 `group: int` 指定群组 ID;
- `normal`:一般冷却事件,需要额外的关键字参数 `group: int` 和
`user: int` 分别指定群组 ID 和用户 ID;
- `user`:用户冷却事件,需要额外的关键字参数 `user: int` 指定用户 ID。
返回:
- `Dict[str, Any]`:事件状态。包含两个字段:
- `status: bool`:冷却状态,其中 `True` 表示冷却正在生效,反之则为
`False`;
- `remaining: int`:冷却剩余时间(秒),当 `status` 字段值为 `False` 时
该字段值为 0。
"""
global _cooldown_events
status = False
remaining = 0
current_time = int(time.time())
group = kwargs.get('group') if event_type in ('group', 'normal') else 0
user = kwargs.get('user') if event_type in ('normal', 'user') else 0
if (records := _cooldown_events.get(token)):
for record in records:
record_group = record.get('group')
record_user = record.get('user')
expired_time = record.get('expired_time')
# 冷却事件正在生效
is_valid = expired_time - current_time >= 0
# 事件记录为全局冷却事件
is_global_record = not record_group and not record_user
# 事件记录为群组冷却事件
is_group_record = record_group == group and not record_user
# 事件记录为一般冷却事件
is_normal_record = record_group == group and record_user == user
# 事件记录为用户冷却事件
is_user_record = not record_group and record_user == user
# 忽略优先级模式
ignore_priority_pattern = ignore_priority and is_normal_record
# 一般模式
normal_pattern = (is_global_record or is_group_record or
is_normal_record or is_user_record)
if (ignore_priority_pattern or normal_pattern) and is_valid:
status = True
remaining = expired_time - current_time
return {
'status': status,
'remaining': remaining
}
def del_event(token: str, event_type='normal', **kwargs) -> None:
"""
移除冷却事件。
参数:
- `token: str`:事件标签。
关键字参数:
- `event_type: str`:事件类型,默认为 `normal`。包括:
- `global`:全局冷却事件;
- `group`:群组冷却事件,需要额外的关键字参数 `group: int` 指定群组 ID;
- `normal`:一般冷却事件,需要额外的关键字参数 `group: int` 和
`user: int` 分别指定群组 ID 和用户 ID;
- `user`:用户冷却事件,需要额外的关键字参数 `user: int` 指定用户 ID。
"""
global _cooldown_events
group = kwargs.get('group') if event_type in ('group', 'normal') else 0
user = kwargs.get('user') if event_type in ('normal', 'user') else 0
records = _cooldown_events.get(token)
for i, record in enumerate(records):
if record.get('group') == group and record.get('user') == user:
del records[i]
logger.info(f'Event {token}({record}) has been removed manually.')
def time_format(timestamp: int, preset='std') -> str:
"""
格式化输出剩余时间信息。
参数:
- `timestamp: int`:时间戳。
关键字参数:
- `preset: str`:格式名称,可用的格式名称有:
- `std`:标准格式,以冒号分隔日、时、分、秒,例如 `05:04:03:02`;
- `zh`:中文格式,例如 `5天4小时3分2秒`。
默认值为 `std`。
返回:
- `str`:格式化的时间信息
"""
days = abs(timestamp) // 86400
hours = (abs(timestamp) - days * 86400) // 3600
minutes = (abs(timestamp) - days * 86400 - hours * 3600) // 60
seconds = abs(timestamp) - days * 86400 - hours * 3600 - minutes * 60
if preset == 'std':
return (f'{str(days).zfill(2)}:{str(hours).zfill(2)}:'
f'{str(minutes).zfill(2)}:{str(seconds).zfill(2)}')
elif preset == 'zh':
result = []
if days:
result.append(f'{days}天')
if hours:
result.append(f'{hours}小时')
if minutes:
result.append(f'{minutes}分')
if seconds or (not days and not hours and not minutes):
result.append(f'{seconds}秒')
return ''.join(result)
@driver.on_startup
def _init() -> None:
"""初始化 scheduler。"""
if not scheduler.running:
scheduler.start()
logger.info('Scheduler started')
@driver.on_startup
def _restore() -> None:
"""驱动启动时从备份文件恢复数据。"""
global _cooldown_events
if BACKUP_FILE.exists():
with open(BACKUP_FILE) as backup:
_cooldown_events = json.load(backup)
logger.debug(f'Restored data from file {BACKUP_FILE.absolute()}.')
else:
logger.warning(f'Backup file {BACKUP_FILE.absolute()} does not exist, '
'skip restoring.')
@driver.on_startup
@scheduler.scheduled_job('interval', seconds=config.cd_autoremove_period)
def _remove_expired() -> None:
"""
自动移除过期事件。
自动移除时间间隔可通过配置项 `CD_AUTOREMOVE_PERIOD` 自定义,默认时间为 3600
秒。
"""
global _cooldown_events
count = 0
current_time = int(time.time())
# 移除过期的事件记录
for _, records in _cooldown_events.items():
for i, record in enumerate(records):
if record.get('expired_time') - current_time <= 0:
del records[i]
count += 1
# 移除无事件记录的事件标签
_cooldown_events = {k: v for k, v in _cooldown_events.items() if v}
logger.debug(f'Automatically removed expired cooldown records: '
f'{count} {"records" if count != 1 else "event"} removed.')
@driver.on_startup
@scheduler.scheduled_job('interval', seconds=config.cd_autobackup_period)
def _auto_backup() -> None:
"""
自动备份数据。
自动备份时间周期可通过配置项 `CD_AUTOBACKUP_PERIOD` 自定义,默认时间为 600
秒。
"""
_backup()
@driver.on_bot_disconnect
async def _backup_on_disconnect(bot: Bot) -> None:
"""
Bot 断开连接时备份数据。
参数:
- `bot: nonebot.adapters.cqhttp.Bot`:Bot 对象。
"""
_backup()
def _backup() -> None:
"""备份冷却事件。"""
global _cooldown_events
if not (path := BACKUP_FILE.parent).is_dir():
path.mkdir()
with open(BACKUP_FILE, 'w') as backup:
json.dump(_cooldown_events, backup, indent=4)
logger.debug(f'Backed up cooldown data to file '
f'{BACKUP_FILE.absolute()}.') | 0.530723 | 0.138928 |
from datetime import timedelta
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.queues import queue_tools
from mayan.apps.task_manager.classes import CeleryQueue
from mayan.apps.task_manager.workers import worker_fast, worker_medium
from celery.schedules import crontab
from .literals import (
CHECK_DELETE_PERIOD_INTERVAL, CHECK_TRASH_PERIOD_INTERVAL,
DELETE_STALE_STUBS_INTERVAL,DEFAULT_STUB_EXPIRATION_INTERVAL
)
queue_converter = CeleryQueue(
name='converter', label=_('Converter'), transient=True, worker=worker_fast
)
queue_documents_periodic = CeleryQueue(
name='documents_periodic', label=_('Documents periodic'), transient=True,
worker=worker_medium
)
queue_uploads = CeleryQueue(
name='uploads', label=_('Uploads'), worker=worker_medium
)
queue_documents = CeleryQueue(
name='documents', label=_('Documents'), worker=worker_medium
)
queue_converter.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_generate_document_page_image',
label=_('Generate document page image')
)
queue_documents.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_delete_document',
label=_('Delete a document')
)
queue_documents.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_clean_empty_duplicate_lists',
label=_('Clean empty duplicate lists')
)
queue_documents.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_trash_can_empty',
label=_('Empty the trash can')
)
# #客户化代码 按照生效日期生效文档
# queue_documents.add_task_type(
# dotted_path='mayan.apps.documents.tasks.task_check_effective_doc',
# label=_('check the effective date')
# )
#客户化代码 每天按照生效日期检查文档生效:DEFAULT_STUB_EXPIRATION_INTERVAL
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_check_effective_doc',
label=_('check the effective date daily'),
name='task_check_effective_doc',
schedule=crontab(minute=10, hour=4),
)
#客户化代码 每天按照废止时间检查文档报废:DEFAULT_STUB_EXPIRATION_INTERVAL
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_check_expired_doc',
label=_('check the expired date daily'),
name='task_check_expired_doc',
schedule=crontab(minute=10, hour=1),
)
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_check_delete_periods',
label=_('Check document type delete periods'),
name='task_check_delete_periods',
schedule=timedelta(
seconds=CHECK_DELETE_PERIOD_INTERVAL
),
)
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_check_trash_periods',
label=_('Check document type trash periods'),
name='task_check_trash_periods',
schedule=timedelta(seconds=CHECK_TRASH_PERIOD_INTERVAL),
)
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_delete_stubs',
label=_('Delete document stubs'),
name='task_delete_stubs',
schedule=timedelta(seconds=DELETE_STALE_STUBS_INTERVAL),
)
queue_tools.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_scan_duplicates_all',
label=_('Duplicated document scan')
)
queue_uploads.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_update_page_count',
label=_('Update document page count')
)
queue_uploads.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_upload_new_version',
label=_('Upload new document version')
)
queue_uploads.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_scan_duplicates_for',
label=_('Scan document duplicates')
) | mayan/apps/documents/queues.py | from datetime import timedelta
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.queues import queue_tools
from mayan.apps.task_manager.classes import CeleryQueue
from mayan.apps.task_manager.workers import worker_fast, worker_medium
from celery.schedules import crontab
from .literals import (
CHECK_DELETE_PERIOD_INTERVAL, CHECK_TRASH_PERIOD_INTERVAL,
DELETE_STALE_STUBS_INTERVAL,DEFAULT_STUB_EXPIRATION_INTERVAL
)
queue_converter = CeleryQueue(
name='converter', label=_('Converter'), transient=True, worker=worker_fast
)
queue_documents_periodic = CeleryQueue(
name='documents_periodic', label=_('Documents periodic'), transient=True,
worker=worker_medium
)
queue_uploads = CeleryQueue(
name='uploads', label=_('Uploads'), worker=worker_medium
)
queue_documents = CeleryQueue(
name='documents', label=_('Documents'), worker=worker_medium
)
queue_converter.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_generate_document_page_image',
label=_('Generate document page image')
)
queue_documents.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_delete_document',
label=_('Delete a document')
)
queue_documents.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_clean_empty_duplicate_lists',
label=_('Clean empty duplicate lists')
)
queue_documents.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_trash_can_empty',
label=_('Empty the trash can')
)
# #客户化代码 按照生效日期生效文档
# queue_documents.add_task_type(
# dotted_path='mayan.apps.documents.tasks.task_check_effective_doc',
# label=_('check the effective date')
# )
#客户化代码 每天按照生效日期检查文档生效:DEFAULT_STUB_EXPIRATION_INTERVAL
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_check_effective_doc',
label=_('check the effective date daily'),
name='task_check_effective_doc',
schedule=crontab(minute=10, hour=4),
)
#客户化代码 每天按照废止时间检查文档报废:DEFAULT_STUB_EXPIRATION_INTERVAL
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_check_expired_doc',
label=_('check the expired date daily'),
name='task_check_expired_doc',
schedule=crontab(minute=10, hour=1),
)
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_check_delete_periods',
label=_('Check document type delete periods'),
name='task_check_delete_periods',
schedule=timedelta(
seconds=CHECK_DELETE_PERIOD_INTERVAL
),
)
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_check_trash_periods',
label=_('Check document type trash periods'),
name='task_check_trash_periods',
schedule=timedelta(seconds=CHECK_TRASH_PERIOD_INTERVAL),
)
queue_documents_periodic.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_delete_stubs',
label=_('Delete document stubs'),
name='task_delete_stubs',
schedule=timedelta(seconds=DELETE_STALE_STUBS_INTERVAL),
)
queue_tools.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_scan_duplicates_all',
label=_('Duplicated document scan')
)
queue_uploads.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_update_page_count',
label=_('Update document page count')
)
queue_uploads.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_upload_new_version',
label=_('Upload new document version')
)
queue_uploads.add_task_type(
dotted_path='mayan.apps.documents.tasks.task_scan_duplicates_for',
label=_('Scan document duplicates')
) | 0.430626 | 0.089335 |
from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from mainPage.views import *
from django.contrib.auth import views as auth_views
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('mainPage.urls')),
path('home/', include('mainPage.urls')),
path('login/', login_form_view, name='login_form'),
path('register/', register_form_view, name='register_form'),
path('advocacy-team-form/', advocacy_form_view, name = 'advocacy_form'),
path('clinical-team-form/', clinical_form_view, name = 'clinical_form'),
path('clinical-voca-team-form/', clinical_voca_form_view, name = 'clinical_voca_form'),
path('MAP-team-form/', map_form_view, name = 'map_form'),
path('OV-team-form/', ov_form_view, name = 'ov_form'),
path('SAFE-Clinic-team-form/', safe_clinic_form_view, name = 'safe_form'),
path('Crisis-Line-team-form/', crisis_line_form_view, name = 'crisis_line_form'),
path('prevention-team-form/', prevention_form_view, name = 'prevention_form'),
path('training-team-form/', training_form_view, name = 'training_form'),
path('development-team-form/', development_form_view, name = 'development_form'),
path('safe_clinic/', safeClinicTeamView.as_view(), name='safe_clinic'),
path('clinical/', clinicalTeamView.as_view(), name='clinical'),
path('advocacy/', advocacyTeamView.as_view(), name='advocacy'),
path('ov/', ovTeamView.as_view(), name='ov'),
path('map/', mapTeamView.as_view(), name='map'),
path('crisis_line/', crisisLineTeamView.as_view(), name='crisis_line'),
path('prevention/', preventionTeamView.as_view(), name='prevention'),
path('training/', trainingTeamView.as_view(), name='training'),
path('development/', developmentTeamView.as_view(), name='development'),
path('clinical-export/', clinical_export, name = 'clinical_export'),
path('advocacy-export/', advocacy_export, name = 'advocacy_export'),
path('clinical-voca-export/', clinical_voca_export, name = 'clinical_voca_export'),
path('map-export/', map_export, name = 'map_export'),
path('ov-export/', ov_export, name = 'ov_export'),
path('safe-clinic-export/', safe_clinic_export, name = 'safe_clinic_export'),
path('crisis-line-export/', crisis_line_export, name = 'crisis_line_export'),
path('prevention-export/', prevention_export, name = 'prevention_export'),
path('training-export/', training_export, name = 'training_export'),
path('development-export/', development_export, name = 'development_export'),
path('form-confirmation/', form_confirmation, name = 'form_confirmation'),
path('logout/', logout_request, name="logout"),
#reset password urls
path('reset_password/',
auth_views.PasswordResetView.as_view(template_name="mainPage/password_reset.html"),
name="reset_password"),
path('reset_password_sent/',
auth_views.PasswordResetDoneView.as_view(template_name="mainPage/password_reset_sent.html"),
name="password_reset_done"),
path('reset/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(template_name="mainPage/password_reset_form.html"),
name="password_reset_confirm"),
path('reset_password_complete/',
auth_views.PasswordResetCompleteView.as_view(template_name="mainPage/password_reset_done.html"),
name="password_reset_complete"),
]
urlpatterns += staticfiles_urlpatterns()
'''
1 - Submit email form //PasswordResetView.as_view()
2 - Email sent success message //PasswordResetDoneView.as_view()
3 - Link to password Rest form in email //PasswordResetConfirmView.as_view()
4 - Password successfully changed message //PasswordResetCompleteView.as_view()
''' | SACWebApp/SACWebApp/urls.py | from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from mainPage.views import *
from django.contrib.auth import views as auth_views
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('mainPage.urls')),
path('home/', include('mainPage.urls')),
path('login/', login_form_view, name='login_form'),
path('register/', register_form_view, name='register_form'),
path('advocacy-team-form/', advocacy_form_view, name = 'advocacy_form'),
path('clinical-team-form/', clinical_form_view, name = 'clinical_form'),
path('clinical-voca-team-form/', clinical_voca_form_view, name = 'clinical_voca_form'),
path('MAP-team-form/', map_form_view, name = 'map_form'),
path('OV-team-form/', ov_form_view, name = 'ov_form'),
path('SAFE-Clinic-team-form/', safe_clinic_form_view, name = 'safe_form'),
path('Crisis-Line-team-form/', crisis_line_form_view, name = 'crisis_line_form'),
path('prevention-team-form/', prevention_form_view, name = 'prevention_form'),
path('training-team-form/', training_form_view, name = 'training_form'),
path('development-team-form/', development_form_view, name = 'development_form'),
path('safe_clinic/', safeClinicTeamView.as_view(), name='safe_clinic'),
path('clinical/', clinicalTeamView.as_view(), name='clinical'),
path('advocacy/', advocacyTeamView.as_view(), name='advocacy'),
path('ov/', ovTeamView.as_view(), name='ov'),
path('map/', mapTeamView.as_view(), name='map'),
path('crisis_line/', crisisLineTeamView.as_view(), name='crisis_line'),
path('prevention/', preventionTeamView.as_view(), name='prevention'),
path('training/', trainingTeamView.as_view(), name='training'),
path('development/', developmentTeamView.as_view(), name='development'),
path('clinical-export/', clinical_export, name = 'clinical_export'),
path('advocacy-export/', advocacy_export, name = 'advocacy_export'),
path('clinical-voca-export/', clinical_voca_export, name = 'clinical_voca_export'),
path('map-export/', map_export, name = 'map_export'),
path('ov-export/', ov_export, name = 'ov_export'),
path('safe-clinic-export/', safe_clinic_export, name = 'safe_clinic_export'),
path('crisis-line-export/', crisis_line_export, name = 'crisis_line_export'),
path('prevention-export/', prevention_export, name = 'prevention_export'),
path('training-export/', training_export, name = 'training_export'),
path('development-export/', development_export, name = 'development_export'),
path('form-confirmation/', form_confirmation, name = 'form_confirmation'),
path('logout/', logout_request, name="logout"),
#reset password urls
path('reset_password/',
auth_views.PasswordResetView.as_view(template_name="mainPage/password_reset.html"),
name="reset_password"),
path('reset_password_sent/',
auth_views.PasswordResetDoneView.as_view(template_name="mainPage/password_reset_sent.html"),
name="password_reset_done"),
path('reset/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(template_name="mainPage/password_reset_form.html"),
name="password_reset_confirm"),
path('reset_password_complete/',
auth_views.PasswordResetCompleteView.as_view(template_name="mainPage/password_reset_done.html"),
name="password_reset_complete"),
]
urlpatterns += staticfiles_urlpatterns()
'''
1 - Submit email form //PasswordResetView.as_view()
2 - Email sent success message //PasswordResetDoneView.as_view()
3 - Link to password Rest form in email //PasswordResetConfirmView.as_view()
4 - Password successfully changed message //PasswordResetCompleteView.as_view()
''' | 0.251372 | 0.052328 |
""" Test for projectq.backends._awsbraket._awsbraket_boto3_client.py """
import pytest
from unittest.mock import patch
from ._awsbraket_boto3_client_test_fixtures import * # noqa: F401,F403
# ==============================================================================
_has_boto3 = True
try:
import botocore
from projectq.backends._awsbraket import _awsbraket_boto3_client
except ImportError:
_has_boto3 = False
has_boto3 = pytest.mark.skipif(not _has_boto3, reason="boto3 package is not installed")
# ==============================================================================
@has_boto3
@patch('boto3.client')
def test_show_devices(mock_boto3_client, show_devices_setup):
creds, search_value, device_value, devicelist_result = show_devices_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.search_devices.return_value = search_value
mock_boto3_client.get_device.return_value = device_value
devicelist = _awsbraket_boto3_client.show_devices(credentials=creds)
assert devicelist == devicelist_result
# ==============================================================================
completed_value = {
'deviceArn': 'arndevice',
'deviceParameters': 'parameters',
'failureReason': 'None',
'outputS3Bucket': 'amazon-braket-bucket',
'outputS3Directory': 'complete/directory',
'quantumTaskArn': 'arntask',
'shots': 123,
'status': 'COMPLETED',
'tags': {'tagkey': 'tagvalue'},
}
failed_value = {
'failureReason': 'This is a failure reason',
'outputS3Bucket': 'amazon-braket-bucket',
'outputS3Directory': 'complete/directory',
'status': 'FAILED',
}
cancelling_value = {
'failureReason': 'None',
'outputS3Bucket': 'amazon-braket-bucket',
'outputS3Directory': 'complete/directory',
'status': 'CANCELLING',
}
other_value = {
'failureReason': 'None',
'outputS3Bucket': 'amazon-braket-bucket',
'outputS3Directory': 'complete/directory',
'status': 'OTHER',
}
# ------------------------------------------------------------------------------
@has_boto3
@patch('boto3.client')
@pytest.mark.parametrize(
"var_status, var_result",
[
('completed', completed_value),
('failed', failed_value),
('cancelling', cancelling_value),
('other', other_value),
],
)
def test_retrieve(mock_boto3_client, var_status, var_result, retrieve_setup):
arntask, creds, device_value, res_completed, results_dict = retrieve_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.get_quantum_task.return_value = var_result
mock_boto3_client.get_device.return_value = device_value
mock_boto3_client.get_object.return_value = results_dict
if var_status == 'completed':
res = _awsbraket_boto3_client.retrieve(credentials=creds, taskArn=arntask)
assert res == res_completed
else:
with pytest.raises(Exception) as exinfo:
_awsbraket_boto3_client.retrieve(credentials=creds, taskArn=arntask, num_retries=2)
print(exinfo.value)
if var_status == 'failed':
assert (
str(exinfo.value)
== "Error while running the code: FAILED. \
The failure reason was: This is a failure reason."
)
if var_status == 'cancelling':
assert str(exinfo.value) == "The job received a CANCEL operation: CANCELLING."
if var_status == 'other':
assert (
str(exinfo.value)
== "Timeout. The Arn of your submitted job \
is arn:aws:braket:us-east-1:id:taskuuid \
and the status of the job is OTHER."
)
# ==============================================================================
@has_boto3
@patch('boto3.client')
def test_retrieve_devicetypes(mock_boto3_client, retrieve_devicetypes_setup):
(
arntask,
creds,
device_value,
results_dict,
res_completed,
) = retrieve_devicetypes_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.get_quantum_task.return_value = completed_value
mock_boto3_client.get_device.return_value = device_value
mock_boto3_client.get_object.return_value = results_dict
res = _awsbraket_boto3_client.retrieve(credentials=creds, taskArn=arntask)
assert res == res_completed
# ==============================================================================
@has_boto3
@patch('boto3.client')
def test_send_too_many_qubits(mock_boto3_client, send_too_many_setup):
(creds, s3_folder, search_value, device_value, info_too_much) = send_too_many_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.search_devices.return_value = search_value
mock_boto3_client.get_device.return_value = device_value
with pytest.raises(_awsbraket_boto3_client.DeviceTooSmall):
_awsbraket_boto3_client.send(info_too_much, device='name2', credentials=creds, s3_folder=s3_folder)
# ==============================================================================
@has_boto3
@patch('boto3.client')
@pytest.mark.parametrize(
"var_status, var_result",
[
('completed', completed_value),
('failed', failed_value),
('cancelling', cancelling_value),
('other', other_value),
],
)
def test_send_real_device_online_verbose(mock_boto3_client, var_status, var_result, real_device_online_setup):
(
qtarntask,
creds,
s3_folder,
info,
search_value,
device_value,
res_completed,
results_dict,
) = real_device_online_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.search_devices.return_value = search_value
mock_boto3_client.get_device.return_value = device_value
mock_boto3_client.create_quantum_task.return_value = qtarntask
mock_boto3_client.get_quantum_task.return_value = var_result
mock_boto3_client.get_object.return_value = results_dict
# This is a ficticios situation because the job will be always queued
# at the beginning. After that the status will change at some point in time
# If the status change while the _get_result loop with num_retries, is
# active the result will change. We mock this using some preconfigured
# statuses in var_status for the tests
if var_status == 'completed':
res = _awsbraket_boto3_client.send(info, device='name2', credentials=creds, s3_folder=s3_folder, verbose=True)
assert res == res_completed
else:
with pytest.raises(Exception) as exinfo:
_awsbraket_boto3_client.send(
info,
device='name2',
credentials=creds,
s3_folder=s3_folder,
verbose=True,
num_retries=2,
)
print(exinfo.value)
if var_status == 'failed':
assert (
str(exinfo.value)
== "Error while running the code: FAILED. The failure \
reason was: This is a failure reason."
)
if var_status == 'cancelling':
assert str(exinfo.value) == "The job received a CANCEL operation: CANCELLING."
if var_status == 'other':
assert (
str(exinfo.value)
== "Timeout. The Arn of your submitted job \
is arn:aws:braket:us-east-1:id:taskuuid \
and the status of the job is OTHER."
)
# ==============================================================================
@has_boto3
@patch('boto3.client')
@pytest.mark.parametrize(
"var_error",
[
('AccessDeniedException'),
('DeviceOfflineException'),
('InternalServiceException'),
('ServiceQuotaExceededException'),
('ValidationException'),
],
)
def test_send_that_errors_are_caught(mock_boto3_client, var_error, send_that_error_setup):
creds, s3_folder, info, search_value, device_value = send_that_error_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.search_devices.return_value = search_value
mock_boto3_client.get_device.return_value = device_value
mock_boto3_client.create_quantum_task.side_effect = botocore.exceptions.ClientError(
{"Error": {"Code": var_error, "Message": "Msg error for " + var_error}},
"create_quantum_task",
)
with pytest.raises(botocore.exceptions.ClientError):
_awsbraket_boto3_client.send(info, device='name2', credentials=creds, s3_folder=s3_folder, num_retries=2)
with pytest.raises(_awsbraket_boto3_client.DeviceOfflineError):
_awsbraket_boto3_client.send(
info,
device='unknown',
credentials=creds,
s3_folder=s3_folder,
num_retries=2,
)
# ==============================================================================
@has_boto3
@patch('boto3.client')
@pytest.mark.parametrize("var_error", [('ResourceNotFoundException')])
def test_retrieve_error_arn_not_exist(mock_boto3_client, var_error, arntask, creds):
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.get_quantum_task.side_effect = botocore.exceptions.ClientError(
{"Error": {"Code": var_error, "Message": "Msg error for " + var_error}},
"get_quantum_task",
)
with pytest.raises(botocore.exceptions.ClientError):
_awsbraket_boto3_client.retrieve(credentials=creds, taskArn=arntask)
# ============================================================================== | projectq/backends/_awsbraket/_awsbraket_boto3_client_test.py | """ Test for projectq.backends._awsbraket._awsbraket_boto3_client.py """
import pytest
from unittest.mock import patch
from ._awsbraket_boto3_client_test_fixtures import * # noqa: F401,F403
# ==============================================================================
_has_boto3 = True
try:
import botocore
from projectq.backends._awsbraket import _awsbraket_boto3_client
except ImportError:
_has_boto3 = False
has_boto3 = pytest.mark.skipif(not _has_boto3, reason="boto3 package is not installed")
# ==============================================================================
@has_boto3
@patch('boto3.client')
def test_show_devices(mock_boto3_client, show_devices_setup):
creds, search_value, device_value, devicelist_result = show_devices_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.search_devices.return_value = search_value
mock_boto3_client.get_device.return_value = device_value
devicelist = _awsbraket_boto3_client.show_devices(credentials=creds)
assert devicelist == devicelist_result
# ==============================================================================
completed_value = {
'deviceArn': 'arndevice',
'deviceParameters': 'parameters',
'failureReason': 'None',
'outputS3Bucket': 'amazon-braket-bucket',
'outputS3Directory': 'complete/directory',
'quantumTaskArn': 'arntask',
'shots': 123,
'status': 'COMPLETED',
'tags': {'tagkey': 'tagvalue'},
}
failed_value = {
'failureReason': 'This is a failure reason',
'outputS3Bucket': 'amazon-braket-bucket',
'outputS3Directory': 'complete/directory',
'status': 'FAILED',
}
cancelling_value = {
'failureReason': 'None',
'outputS3Bucket': 'amazon-braket-bucket',
'outputS3Directory': 'complete/directory',
'status': 'CANCELLING',
}
other_value = {
'failureReason': 'None',
'outputS3Bucket': 'amazon-braket-bucket',
'outputS3Directory': 'complete/directory',
'status': 'OTHER',
}
# ------------------------------------------------------------------------------
@has_boto3
@patch('boto3.client')
@pytest.mark.parametrize(
"var_status, var_result",
[
('completed', completed_value),
('failed', failed_value),
('cancelling', cancelling_value),
('other', other_value),
],
)
def test_retrieve(mock_boto3_client, var_status, var_result, retrieve_setup):
arntask, creds, device_value, res_completed, results_dict = retrieve_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.get_quantum_task.return_value = var_result
mock_boto3_client.get_device.return_value = device_value
mock_boto3_client.get_object.return_value = results_dict
if var_status == 'completed':
res = _awsbraket_boto3_client.retrieve(credentials=creds, taskArn=arntask)
assert res == res_completed
else:
with pytest.raises(Exception) as exinfo:
_awsbraket_boto3_client.retrieve(credentials=creds, taskArn=arntask, num_retries=2)
print(exinfo.value)
if var_status == 'failed':
assert (
str(exinfo.value)
== "Error while running the code: FAILED. \
The failure reason was: This is a failure reason."
)
if var_status == 'cancelling':
assert str(exinfo.value) == "The job received a CANCEL operation: CANCELLING."
if var_status == 'other':
assert (
str(exinfo.value)
== "Timeout. The Arn of your submitted job \
is arn:aws:braket:us-east-1:id:taskuuid \
and the status of the job is OTHER."
)
# ==============================================================================
@has_boto3
@patch('boto3.client')
def test_retrieve_devicetypes(mock_boto3_client, retrieve_devicetypes_setup):
(
arntask,
creds,
device_value,
results_dict,
res_completed,
) = retrieve_devicetypes_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.get_quantum_task.return_value = completed_value
mock_boto3_client.get_device.return_value = device_value
mock_boto3_client.get_object.return_value = results_dict
res = _awsbraket_boto3_client.retrieve(credentials=creds, taskArn=arntask)
assert res == res_completed
# ==============================================================================
@has_boto3
@patch('boto3.client')
def test_send_too_many_qubits(mock_boto3_client, send_too_many_setup):
(creds, s3_folder, search_value, device_value, info_too_much) = send_too_many_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.search_devices.return_value = search_value
mock_boto3_client.get_device.return_value = device_value
with pytest.raises(_awsbraket_boto3_client.DeviceTooSmall):
_awsbraket_boto3_client.send(info_too_much, device='name2', credentials=creds, s3_folder=s3_folder)
# ==============================================================================
@has_boto3
@patch('boto3.client')
@pytest.mark.parametrize(
"var_status, var_result",
[
('completed', completed_value),
('failed', failed_value),
('cancelling', cancelling_value),
('other', other_value),
],
)
def test_send_real_device_online_verbose(mock_boto3_client, var_status, var_result, real_device_online_setup):
(
qtarntask,
creds,
s3_folder,
info,
search_value,
device_value,
res_completed,
results_dict,
) = real_device_online_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.search_devices.return_value = search_value
mock_boto3_client.get_device.return_value = device_value
mock_boto3_client.create_quantum_task.return_value = qtarntask
mock_boto3_client.get_quantum_task.return_value = var_result
mock_boto3_client.get_object.return_value = results_dict
# This is a ficticios situation because the job will be always queued
# at the beginning. After that the status will change at some point in time
# If the status change while the _get_result loop with num_retries, is
# active the result will change. We mock this using some preconfigured
# statuses in var_status for the tests
if var_status == 'completed':
res = _awsbraket_boto3_client.send(info, device='name2', credentials=creds, s3_folder=s3_folder, verbose=True)
assert res == res_completed
else:
with pytest.raises(Exception) as exinfo:
_awsbraket_boto3_client.send(
info,
device='name2',
credentials=creds,
s3_folder=s3_folder,
verbose=True,
num_retries=2,
)
print(exinfo.value)
if var_status == 'failed':
assert (
str(exinfo.value)
== "Error while running the code: FAILED. The failure \
reason was: This is a failure reason."
)
if var_status == 'cancelling':
assert str(exinfo.value) == "The job received a CANCEL operation: CANCELLING."
if var_status == 'other':
assert (
str(exinfo.value)
== "Timeout. The Arn of your submitted job \
is arn:aws:braket:us-east-1:id:taskuuid \
and the status of the job is OTHER."
)
# ==============================================================================
@has_boto3
@patch('boto3.client')
@pytest.mark.parametrize(
"var_error",
[
('AccessDeniedException'),
('DeviceOfflineException'),
('InternalServiceException'),
('ServiceQuotaExceededException'),
('ValidationException'),
],
)
def test_send_that_errors_are_caught(mock_boto3_client, var_error, send_that_error_setup):
creds, s3_folder, info, search_value, device_value = send_that_error_setup
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.search_devices.return_value = search_value
mock_boto3_client.get_device.return_value = device_value
mock_boto3_client.create_quantum_task.side_effect = botocore.exceptions.ClientError(
{"Error": {"Code": var_error, "Message": "Msg error for " + var_error}},
"create_quantum_task",
)
with pytest.raises(botocore.exceptions.ClientError):
_awsbraket_boto3_client.send(info, device='name2', credentials=creds, s3_folder=s3_folder, num_retries=2)
with pytest.raises(_awsbraket_boto3_client.DeviceOfflineError):
_awsbraket_boto3_client.send(
info,
device='unknown',
credentials=creds,
s3_folder=s3_folder,
num_retries=2,
)
# ==============================================================================
@has_boto3
@patch('boto3.client')
@pytest.mark.parametrize("var_error", [('ResourceNotFoundException')])
def test_retrieve_error_arn_not_exist(mock_boto3_client, var_error, arntask, creds):
mock_boto3_client.return_value = mock_boto3_client
mock_boto3_client.get_quantum_task.side_effect = botocore.exceptions.ClientError(
{"Error": {"Code": var_error, "Message": "Msg error for " + var_error}},
"get_quantum_task",
)
with pytest.raises(botocore.exceptions.ClientError):
_awsbraket_boto3_client.retrieve(credentials=creds, taskArn=arntask)
# ============================================================================== | 0.509276 | 0.335596 |
import re
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.generic import View
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .models import Posts, Likes, Profile
from .forms import PostsForm, CommentsForm, VotesForm, UserUpdateForm, ProfileUpdateForm
from django.contrib import messages
User = get_user_model()
# Create your views here.
class IndexView(View):
"""
Index view
"""
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
form = PostsForm()
posts = Posts.objects.all()
votes = Likes.objects.all()
user = User
return render(request, "core/index.html", {"posts": posts, "form": form, "votes": votes})
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
form = PostsForm(request.POST, request.FILES)
posts = Posts.objects.all()
if form.is_valid():
post = form.save(commit=False)
post.user = request.user
post.save()
return render(request, "core/index.html", {"posts": posts, "form": form})
else:
return render(request, "core/index.html", {"posts": posts, "form": form})
class PostsDetails(View):
"""
Posts Details view
"""
def get(self, request, pk, *args, **kwargs):
post = Posts.objects.get(id=pk)
commentForm = CommentsForm()
vote_form = VotesForm()
return render(request,
"core/post_details.html",
{"post": post, "commentForm": commentForm, "voteForm": vote_form})
def post(self, request, pk, *args, **kwargs):
commentForm = CommentsForm(request.POST, request.FILES)
if commentForm.is_valid():
comment = commentForm.save(commit=False)
comment.user = request.user
comment.post = Posts.objects.get(id=pk)
comment.save()
return redirect("/post/" + str(pk) + "/")
else:
return redirect("/post/" + str(pk) + "/")
def vote_post(request, id):
post = Posts.objects.get(id=id)
likes = Likes.objects.filter(post=post)
design = []
usability = []
creativity = []
content = []
for x in likes:
design.append(x.design)
usability.append(x.usability)
creativity.append(x.creativity)
content.append(x.content)
de = []
us = []
cre = []
con = []
if len(usability) > 0:
usa = (sum(usability) / len(usability))
us.append(usa)
if len(creativity) > 0:
crea = (sum(creativity) / len(creativity))
cre.append(crea)
if len(design) > 0:
des = (sum(design) / len(design))
de.append(des)
if len(content) > 0:
cont = (sum(content) / len(content))
con.append(cont)
vote_form = VotesForm()
if request.method == 'POST':
vote_form = VotesForm(request.POST)
if vote_form.is_valid():
design = vote_form.cleaned_data['design']
usability = vote_form.cleaned_data['usability']
content = vote_form.cleaned_data['content']
creativity = vote_form.cleaned_data['creativity']
rating = Likes(design=design, usability=usability,
content=content, creativity=creativity,
user=request.user, post=post)
rating.save()
return redirect('/')
return render(request, 'core/post_details.html',
{"post": post, "des": des, "usa": usa, "cont": cont, "crea": crea, "voteForm": vote_form})
def profile(request, username):
title = "profile"
posts = Posts.get_user(username)
profile = Profile.get_user(username)
print(request.user)
print(profile)
return render(request, 'core/profile.html', {"title": title, "profiles": profile, "posts": posts})
def update_profile(request, profile_id):
user = User.objects.get(pk=profile_id)
if request.method == "POST":
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f"You Have Successfully Updated Your Profile!")
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
return render(request, 'core/update_profile.html', {"u_form": u_form, "p_form": p_form})
def post_site(request):
current_user = request.user
print(current_user)
form = PostsForm(request.POST, request.FILES)
print(form)
if form.is_valid():
print("Valid")
form = form.save(commit=False)
form.user = current_user
form.save()
print("saved")
redirect('index_view')
print("redirecting")
else:
form = PostsForm()
return render(request, "core/post_site.html", {"form": form}) | core/views.py | import re
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.generic import View
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .models import Posts, Likes, Profile
from .forms import PostsForm, CommentsForm, VotesForm, UserUpdateForm, ProfileUpdateForm
from django.contrib import messages
User = get_user_model()
# Create your views here.
class IndexView(View):
"""
Index view
"""
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
form = PostsForm()
posts = Posts.objects.all()
votes = Likes.objects.all()
user = User
return render(request, "core/index.html", {"posts": posts, "form": form, "votes": votes})
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
form = PostsForm(request.POST, request.FILES)
posts = Posts.objects.all()
if form.is_valid():
post = form.save(commit=False)
post.user = request.user
post.save()
return render(request, "core/index.html", {"posts": posts, "form": form})
else:
return render(request, "core/index.html", {"posts": posts, "form": form})
class PostsDetails(View):
"""
Posts Details view
"""
def get(self, request, pk, *args, **kwargs):
post = Posts.objects.get(id=pk)
commentForm = CommentsForm()
vote_form = VotesForm()
return render(request,
"core/post_details.html",
{"post": post, "commentForm": commentForm, "voteForm": vote_form})
def post(self, request, pk, *args, **kwargs):
commentForm = CommentsForm(request.POST, request.FILES)
if commentForm.is_valid():
comment = commentForm.save(commit=False)
comment.user = request.user
comment.post = Posts.objects.get(id=pk)
comment.save()
return redirect("/post/" + str(pk) + "/")
else:
return redirect("/post/" + str(pk) + "/")
def vote_post(request, id):
post = Posts.objects.get(id=id)
likes = Likes.objects.filter(post=post)
design = []
usability = []
creativity = []
content = []
for x in likes:
design.append(x.design)
usability.append(x.usability)
creativity.append(x.creativity)
content.append(x.content)
de = []
us = []
cre = []
con = []
if len(usability) > 0:
usa = (sum(usability) / len(usability))
us.append(usa)
if len(creativity) > 0:
crea = (sum(creativity) / len(creativity))
cre.append(crea)
if len(design) > 0:
des = (sum(design) / len(design))
de.append(des)
if len(content) > 0:
cont = (sum(content) / len(content))
con.append(cont)
vote_form = VotesForm()
if request.method == 'POST':
vote_form = VotesForm(request.POST)
if vote_form.is_valid():
design = vote_form.cleaned_data['design']
usability = vote_form.cleaned_data['usability']
content = vote_form.cleaned_data['content']
creativity = vote_form.cleaned_data['creativity']
rating = Likes(design=design, usability=usability,
content=content, creativity=creativity,
user=request.user, post=post)
rating.save()
return redirect('/')
return render(request, 'core/post_details.html',
{"post": post, "des": des, "usa": usa, "cont": cont, "crea": crea, "voteForm": vote_form})
def profile(request, username):
title = "profile"
posts = Posts.get_user(username)
profile = Profile.get_user(username)
print(request.user)
print(profile)
return render(request, 'core/profile.html', {"title": title, "profiles": profile, "posts": posts})
def update_profile(request, profile_id):
user = User.objects.get(pk=profile_id)
if request.method == "POST":
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f"You Have Successfully Updated Your Profile!")
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
return render(request, 'core/update_profile.html', {"u_form": u_form, "p_form": p_form})
def post_site(request):
current_user = request.user
print(current_user)
form = PostsForm(request.POST, request.FILES)
print(form)
if form.is_valid():
print("Valid")
form = form.save(commit=False)
form.user = current_user
form.save()
print("saved")
redirect('index_view')
print("redirecting")
else:
form = PostsForm()
return render(request, "core/post_site.html", {"form": form}) | 0.449876 | 0.082328 |
from logging import fatal
import os
from pathlib import Path
import shutil
import zipfile
import re
from mirdata import download_utils, core
import pytest
@pytest.fixture
def mock_download_from_remote(mocker):
return mocker.patch.object(download_utils, "download_from_remote")
@pytest.fixture
def mock_downloader(mocker):
return mocker.patch.object(download_utils, "downloader")
@pytest.fixture
def mock_untar(mocker):
return mocker.patch.object(download_utils, "untar")
@pytest.fixture
def mock_unzip(mocker):
return mocker.patch.object(download_utils, "unzip")
@pytest.fixture
def mock_path(mocker, mock_download_from_remote):
return mocker.patch.object(Path, "mkdir")
def test_downloader(mocker, mock_path):
mock_zip = mocker.patch.object(download_utils, "download_zip_file")
mock_tar = mocker.patch.object(download_utils, "download_tar_file")
mock_download_from_remote = mocker.patch.object(
download_utils, "download_from_remote"
)
zip_remote = download_utils.RemoteFileMetadata(
filename="remote.zip", url="a", checksum=("1234")
)
tar_remote = download_utils.RemoteFileMetadata(
filename="remote.tar.gz", url="a", checksum=("1234")
)
file_remote = download_utils.RemoteFileMetadata(
filename="remote.txt", url="a", checksum=("1234")
)
index = core.Index("asdf.json")
# Zip only
download_utils.downloader("a", index=index, remotes={"b": zip_remote})
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mocker.resetall()
# tar only
download_utils.downloader("a", index=index, remotes={"b": tar_remote})
mock_tar.assert_called_once_with(tar_remote, "a", False, False, False)
mocker.resetall()
# file only
download_utils.downloader("a", index=index, remotes={"b": file_remote})
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# zip and tar
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote}
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_tar.assert_called_once_with(tar_remote, "a", False, False, False)
mocker.resetall()
# zip and file
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": file_remote}
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# tar and file
download_utils.downloader(
"a", index=index, remotes={"b": tar_remote, "c": file_remote}
)
mock_tar.assert_called_once_with(tar_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# zip and tar and file
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote, "d": file_remote}
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mock_tar.assert_called_once_with(tar_remote, "a", False, False, False)
mocker.resetall()
# test partial download
download_utils.downloader(
"a",
index=index,
remotes={"b": zip_remote, "c": tar_remote, "d": file_remote},
partial_download=["b", "d"],
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# test bad type partial download
with pytest.raises(ValueError):
download_utils.downloader(
"a",
index=index,
remotes={"b": zip_remote, "c": tar_remote, "d": file_remote},
partial_download="b",
)
with pytest.raises(ValueError):
download_utils.downloader(
"a",
index=index,
remotes={"b": zip_remote, "c": tar_remote, "d": file_remote},
partial_download=["d", "e"],
)
# test info message
download_utils.downloader("a", index=index, info_message="I am a message!")
mocker.resetall()
# test download twice - defaults
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote, "d": file_remote}
)
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote, "d": file_remote}
)
# test download twice - cleanup=True
download_utils.downloader(
"a",
index=index,
remotes={"b": zip_remote, "c": tar_remote, "d": file_remote},
cleanup=True,
)
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote, "d": file_remote}
)
def test_download_index_cases(mocker, mock_path):
mock_zip = mocker.patch.object(download_utils, "download_zip_file")
mock_download_from_remote = mocker.patch.object(
download_utils, "download_from_remote"
)
zip_remote = download_utils.RemoteFileMetadata(
filename="remote.zip", url="a", checksum=("1234")
)
file_remote = download_utils.RemoteFileMetadata(
filename="remote.txt", url="a", checksum=("1234")
)
index = core.Index("asdf.json")
index_partial = core.Index("asdf.json", partial_download=["b"])
remote_index = core.Index("asdf.json", url="b", checksum="4567")
# no remotes, no remote index
download_utils.downloader("a", index=index, remotes=None, partial_download=None)
mock_zip.assert_not_called()
mock_download_from_remote.assert_not_called()
mocker.resetall()
# remotes, partial download index
download_utils.downloader(
"a",
index=index_partial,
remotes={"b": zip_remote, "d": file_remote},
partial_download=None,
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_not_called()
mocker.resetall()
# remotes, partial download index overwrite
download_utils.downloader(
"a",
index=index_partial,
remotes={"b": zip_remote, "d": file_remote},
partial_download=["d"],
)
mock_zip.assert_not_called()
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# no remotes, remote index
download_utils.downloader(
"a", index=remote_index, remotes=None, partial_download=None
)
mock_zip.assert_not_called()
mock_download_from_remote.assert_called_once_with(
remote_index.remote, "a", False, False
)
mocker.resetall()
# remotes, remote index
download_utils.downloader(
"a", index=remote_index, remotes={"b": zip_remote}, partial_download=None
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(
remote_index.remote, "a", False, False
)
mocker.resetall()
def _clean(fpath):
if os.path.exists(fpath):
shutil.rmtree(fpath)
def test_downloader_with_server_file(httpserver):
index = core.Index("asdf.json")
httpserver.serve_content(open("tests/resources/remote.wav").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav",
url=httpserver.url,
checksum=("3f77d0d69dc41b3696f074ad6bf2852f"),
)
save_dir = "tests/resources/tmp_download_test"
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
_clean(save_dir)
# test with wrong checksum: raises error
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav", url=httpserver.url, checksum=("wrongchecksum")
)
with pytest.raises(OSError):
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
with pytest.raises(OSError):
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
with pytest.raises(OSError):
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
# test with wrong checksum: ignore error
with pytest.warns(UserWarning):
_clean(save_dir)
download_utils.downloader(
save_dir,
index=index,
remotes={"b": TEST_REMOTE},
allow_invalid_checksum=True,
)
with pytest.warns(UserWarning):
_clean(save_dir)
download_utils.downloader(
save_dir,
index=index,
remotes={"b": TEST_REMOTE},
cleanup=True,
allow_invalid_checksum=True,
)
with pytest.warns(UserWarning):
_clean(save_dir)
download_utils.downloader(
save_dir,
index=index,
remotes={"b": TEST_REMOTE},
force_overwrite=True,
allow_invalid_checksum=True,
)
def test_downloader_with_server_zip(httpserver):
index = core.Index("asdf.json")
httpserver.serve_content(open("tests/resources/remote.zip", "rb").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.zip",
url=httpserver.url,
checksum=("7a31ccfa28bfa3fb112d16c96e9d9a89"),
)
save_dir = "tests/resources/_tmp_test_download_utils"
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
# test downloading twice
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
_clean(save_dir)
def test_downloader_with_server_tar(httpserver):
index = core.Index("asdf.json")
httpserver.serve_content(open("tests/resources/remote.tar.gz", "rb").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.tar.gz",
url=httpserver.url,
checksum=("9042f5eebdcd0b94aa7a3c9bf12dc51d"),
)
save_dir = "tests/resources/_tmp_test_download_utils"
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
_clean(save_dir)
def test_download_from_remote(httpserver, tmpdir):
httpserver.serve_content(open("tests/resources/remote.wav").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav",
url=httpserver.url,
checksum=("3f77d0d69dc41b3696f074ad6bf2852f"),
)
download_path = download_utils.download_from_remote(
TEST_REMOTE, str(tmpdir), False, False
)
def test_download_from_remote_destdir(httpserver, tmpdir):
httpserver.serve_content(open("tests/resources/remote.wav").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav",
url=httpserver.url,
checksum=("3f77d0d69dc41b3696f074ad6bf2852f"),
destination_dir="subfolder",
)
download_path = download_utils.download_from_remote(
TEST_REMOTE, str(tmpdir), False, False
)
expected_download_path = os.path.join(str(tmpdir), "subfolder", "remote.wav")
assert expected_download_path == download_path
def test_download_from_remote_raises_IOError(httpserver, tmpdir):
httpserver.serve_content("File not found!", 404)
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav", url=httpserver.url, checksum=("1234")
)
with pytest.raises(IOError):
download_utils.download_from_remote(TEST_REMOTE, str(tmpdir), False, False)
def test_unzip():
download_utils.unzip("tests/resources/file.zip", cleanup=False)
expected_file_location = os.path.join("tests", "resources", "file.txt")
assert os.path.exists(expected_file_location)
os.remove(expected_file_location)
def test_untar():
download_utils.untar("tests/resources/file.tar.gz", cleanup=False)
expected_file_location = os.path.join("tests", "resources", "file", "file.txt")
assert os.path.exists(expected_file_location)
os.remove(expected_file_location)
def test_download_zip_file(mocker, mock_download_from_remote, mock_unzip):
mock_download_from_remote.return_value = "foo"
download_utils.download_zip_file("a", "b", False, False, False)
mock_download_from_remote.assert_called_once_with("a", "b", False, False)
mock_unzip.assert_called_once_with("foo", cleanup=False)
_clean("a")
def test_download_zip_file_ignorechecksum(
mocker, mock_download_from_remote, mock_unzip
):
mock_download_from_remote.return_value = "foo"
download_utils.download_zip_file("a", "b", False, False, True)
mock_download_from_remote.assert_called_once_with("a", "b", False, True)
mock_unzip.assert_called_once_with("foo", cleanup=False)
_clean("a")
def test_download_tar_file(mocker, mock_download_from_remote, mock_untar):
mock_download_from_remote.return_value = "foo"
download_utils.download_tar_file("a", "b", False, False, False)
mock_download_from_remote.assert_called_once_with("a", "b", False, False)
mock_untar.assert_called_once_with("foo", cleanup=False)
_clean("a")
def test_download_tar_file_ignorechecksum(
mocker, mock_download_from_remote, mock_untar
):
mock_download_from_remote.return_value = "foo"
download_utils.download_tar_file("a", "b", False, False, True)
mock_download_from_remote.assert_called_once_with("a", "b", False, True)
mock_untar.assert_called_once_with("foo", cleanup=False)
_clean("a")
def test_extractall_unicode(mocker, mock_download_from_remote, mock_unzip):
zip_files = ("tests/resources/utfissue.zip", "tests/resources/utfissuewin.zip")
expected_files_all = (
["pic👨👩👧👦🎂.jpg", "Benoît.txt", "Icon"],
["pic👨👩👧👦🎂.jpg", "BenoŒt.txt", "Icon"],
)
for zipf, expected_files in zip(zip_files, expected_files_all):
zfile = zipfile.ZipFile(zipf, "r")
download_utils.extractall_unicode(zfile, os.path.dirname("tests/resources/"))
zfile.close()
for expected_file in expected_files:
expected_file_location = os.path.join(
"tests", "resources", "utfissue", expected_file
)
assert os.path.exists(expected_file_location)
os.remove(expected_file_location)
def test_extractall_cp437(mocker, mock_download_from_remote, mock_unzip):
zfile = zipfile.ZipFile("tests/resources/utfissue.zip", "r")
zfile.extractall(os.path.dirname("tests/resources/"))
zfile.close()
expected_files = ["pic👨👩👧👦🎂.jpg", "Benoît.txt", "Icon"]
for expected_file in expected_files:
expected_file_location = os.path.join("tests", "resources", expected_file)
assert not os.path.exists(expected_file_location)
shutil.rmtree(os.path.join("tests", "resources", "__MACOSX"))
shutil.rmtree(os.path.join("tests", "resources", "utfissue")) | tests/test_download_utils.py | from logging import fatal
import os
from pathlib import Path
import shutil
import zipfile
import re
from mirdata import download_utils, core
import pytest
@pytest.fixture
def mock_download_from_remote(mocker):
return mocker.patch.object(download_utils, "download_from_remote")
@pytest.fixture
def mock_downloader(mocker):
return mocker.patch.object(download_utils, "downloader")
@pytest.fixture
def mock_untar(mocker):
return mocker.patch.object(download_utils, "untar")
@pytest.fixture
def mock_unzip(mocker):
return mocker.patch.object(download_utils, "unzip")
@pytest.fixture
def mock_path(mocker, mock_download_from_remote):
return mocker.patch.object(Path, "mkdir")
def test_downloader(mocker, mock_path):
mock_zip = mocker.patch.object(download_utils, "download_zip_file")
mock_tar = mocker.patch.object(download_utils, "download_tar_file")
mock_download_from_remote = mocker.patch.object(
download_utils, "download_from_remote"
)
zip_remote = download_utils.RemoteFileMetadata(
filename="remote.zip", url="a", checksum=("1234")
)
tar_remote = download_utils.RemoteFileMetadata(
filename="remote.tar.gz", url="a", checksum=("1234")
)
file_remote = download_utils.RemoteFileMetadata(
filename="remote.txt", url="a", checksum=("1234")
)
index = core.Index("asdf.json")
# Zip only
download_utils.downloader("a", index=index, remotes={"b": zip_remote})
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mocker.resetall()
# tar only
download_utils.downloader("a", index=index, remotes={"b": tar_remote})
mock_tar.assert_called_once_with(tar_remote, "a", False, False, False)
mocker.resetall()
# file only
download_utils.downloader("a", index=index, remotes={"b": file_remote})
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# zip and tar
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote}
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_tar.assert_called_once_with(tar_remote, "a", False, False, False)
mocker.resetall()
# zip and file
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": file_remote}
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# tar and file
download_utils.downloader(
"a", index=index, remotes={"b": tar_remote, "c": file_remote}
)
mock_tar.assert_called_once_with(tar_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# zip and tar and file
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote, "d": file_remote}
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mock_tar.assert_called_once_with(tar_remote, "a", False, False, False)
mocker.resetall()
# test partial download
download_utils.downloader(
"a",
index=index,
remotes={"b": zip_remote, "c": tar_remote, "d": file_remote},
partial_download=["b", "d"],
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# test bad type partial download
with pytest.raises(ValueError):
download_utils.downloader(
"a",
index=index,
remotes={"b": zip_remote, "c": tar_remote, "d": file_remote},
partial_download="b",
)
with pytest.raises(ValueError):
download_utils.downloader(
"a",
index=index,
remotes={"b": zip_remote, "c": tar_remote, "d": file_remote},
partial_download=["d", "e"],
)
# test info message
download_utils.downloader("a", index=index, info_message="I am a message!")
mocker.resetall()
# test download twice - defaults
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote, "d": file_remote}
)
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote, "d": file_remote}
)
# test download twice - cleanup=True
download_utils.downloader(
"a",
index=index,
remotes={"b": zip_remote, "c": tar_remote, "d": file_remote},
cleanup=True,
)
download_utils.downloader(
"a", index=index, remotes={"b": zip_remote, "c": tar_remote, "d": file_remote}
)
def test_download_index_cases(mocker, mock_path):
mock_zip = mocker.patch.object(download_utils, "download_zip_file")
mock_download_from_remote = mocker.patch.object(
download_utils, "download_from_remote"
)
zip_remote = download_utils.RemoteFileMetadata(
filename="remote.zip", url="a", checksum=("1234")
)
file_remote = download_utils.RemoteFileMetadata(
filename="remote.txt", url="a", checksum=("1234")
)
index = core.Index("asdf.json")
index_partial = core.Index("asdf.json", partial_download=["b"])
remote_index = core.Index("asdf.json", url="b", checksum="4567")
# no remotes, no remote index
download_utils.downloader("a", index=index, remotes=None, partial_download=None)
mock_zip.assert_not_called()
mock_download_from_remote.assert_not_called()
mocker.resetall()
# remotes, partial download index
download_utils.downloader(
"a",
index=index_partial,
remotes={"b": zip_remote, "d": file_remote},
partial_download=None,
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_not_called()
mocker.resetall()
# remotes, partial download index overwrite
download_utils.downloader(
"a",
index=index_partial,
remotes={"b": zip_remote, "d": file_remote},
partial_download=["d"],
)
mock_zip.assert_not_called()
mock_download_from_remote.assert_called_once_with(file_remote, "a", False, False)
mocker.resetall()
# no remotes, remote index
download_utils.downloader(
"a", index=remote_index, remotes=None, partial_download=None
)
mock_zip.assert_not_called()
mock_download_from_remote.assert_called_once_with(
remote_index.remote, "a", False, False
)
mocker.resetall()
# remotes, remote index
download_utils.downloader(
"a", index=remote_index, remotes={"b": zip_remote}, partial_download=None
)
mock_zip.assert_called_once_with(zip_remote, "a", False, False, False)
mock_download_from_remote.assert_called_once_with(
remote_index.remote, "a", False, False
)
mocker.resetall()
def _clean(fpath):
if os.path.exists(fpath):
shutil.rmtree(fpath)
def test_downloader_with_server_file(httpserver):
index = core.Index("asdf.json")
httpserver.serve_content(open("tests/resources/remote.wav").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav",
url=httpserver.url,
checksum=("3f77d0d69dc41b3696f074ad6bf2852f"),
)
save_dir = "tests/resources/tmp_download_test"
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
_clean(save_dir)
# test with wrong checksum: raises error
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav", url=httpserver.url, checksum=("wrongchecksum")
)
with pytest.raises(OSError):
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
with pytest.raises(OSError):
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
with pytest.raises(OSError):
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
# test with wrong checksum: ignore error
with pytest.warns(UserWarning):
_clean(save_dir)
download_utils.downloader(
save_dir,
index=index,
remotes={"b": TEST_REMOTE},
allow_invalid_checksum=True,
)
with pytest.warns(UserWarning):
_clean(save_dir)
download_utils.downloader(
save_dir,
index=index,
remotes={"b": TEST_REMOTE},
cleanup=True,
allow_invalid_checksum=True,
)
with pytest.warns(UserWarning):
_clean(save_dir)
download_utils.downloader(
save_dir,
index=index,
remotes={"b": TEST_REMOTE},
force_overwrite=True,
allow_invalid_checksum=True,
)
def test_downloader_with_server_zip(httpserver):
index = core.Index("asdf.json")
httpserver.serve_content(open("tests/resources/remote.zip", "rb").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.zip",
url=httpserver.url,
checksum=("7a31ccfa28bfa3fb112d16c96e9d9a89"),
)
save_dir = "tests/resources/_tmp_test_download_utils"
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
# test downloading twice
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
_clean(save_dir)
def test_downloader_with_server_tar(httpserver):
index = core.Index("asdf.json")
httpserver.serve_content(open("tests/resources/remote.tar.gz", "rb").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.tar.gz",
url=httpserver.url,
checksum=("9042f5eebdcd0b94aa7a3c9bf12dc51d"),
)
save_dir = "tests/resources/_tmp_test_download_utils"
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, cleanup=True
)
# test downloading twice
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
_clean(save_dir)
download_utils.downloader(save_dir, index=index, remotes={"b": TEST_REMOTE})
# test downloading twice
download_utils.downloader(
save_dir, index=index, remotes={"b": TEST_REMOTE}, force_overwrite=True
)
_clean(save_dir)
def test_download_from_remote(httpserver, tmpdir):
httpserver.serve_content(open("tests/resources/remote.wav").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav",
url=httpserver.url,
checksum=("3f77d0d69dc41b3696f074ad6bf2852f"),
)
download_path = download_utils.download_from_remote(
TEST_REMOTE, str(tmpdir), False, False
)
def test_download_from_remote_destdir(httpserver, tmpdir):
httpserver.serve_content(open("tests/resources/remote.wav").read())
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav",
url=httpserver.url,
checksum=("3f77d0d69dc41b3696f074ad6bf2852f"),
destination_dir="subfolder",
)
download_path = download_utils.download_from_remote(
TEST_REMOTE, str(tmpdir), False, False
)
expected_download_path = os.path.join(str(tmpdir), "subfolder", "remote.wav")
assert expected_download_path == download_path
def test_download_from_remote_raises_IOError(httpserver, tmpdir):
httpserver.serve_content("File not found!", 404)
TEST_REMOTE = download_utils.RemoteFileMetadata(
filename="remote.wav", url=httpserver.url, checksum=("1234")
)
with pytest.raises(IOError):
download_utils.download_from_remote(TEST_REMOTE, str(tmpdir), False, False)
def test_unzip():
download_utils.unzip("tests/resources/file.zip", cleanup=False)
expected_file_location = os.path.join("tests", "resources", "file.txt")
assert os.path.exists(expected_file_location)
os.remove(expected_file_location)
def test_untar():
download_utils.untar("tests/resources/file.tar.gz", cleanup=False)
expected_file_location = os.path.join("tests", "resources", "file", "file.txt")
assert os.path.exists(expected_file_location)
os.remove(expected_file_location)
def test_download_zip_file(mocker, mock_download_from_remote, mock_unzip):
mock_download_from_remote.return_value = "foo"
download_utils.download_zip_file("a", "b", False, False, False)
mock_download_from_remote.assert_called_once_with("a", "b", False, False)
mock_unzip.assert_called_once_with("foo", cleanup=False)
_clean("a")
def test_download_zip_file_ignorechecksum(
mocker, mock_download_from_remote, mock_unzip
):
mock_download_from_remote.return_value = "foo"
download_utils.download_zip_file("a", "b", False, False, True)
mock_download_from_remote.assert_called_once_with("a", "b", False, True)
mock_unzip.assert_called_once_with("foo", cleanup=False)
_clean("a")
def test_download_tar_file(mocker, mock_download_from_remote, mock_untar):
mock_download_from_remote.return_value = "foo"
download_utils.download_tar_file("a", "b", False, False, False)
mock_download_from_remote.assert_called_once_with("a", "b", False, False)
mock_untar.assert_called_once_with("foo", cleanup=False)
_clean("a")
def test_download_tar_file_ignorechecksum(
mocker, mock_download_from_remote, mock_untar
):
mock_download_from_remote.return_value = "foo"
download_utils.download_tar_file("a", "b", False, False, True)
mock_download_from_remote.assert_called_once_with("a", "b", False, True)
mock_untar.assert_called_once_with("foo", cleanup=False)
_clean("a")
def test_extractall_unicode(mocker, mock_download_from_remote, mock_unzip):
zip_files = ("tests/resources/utfissue.zip", "tests/resources/utfissuewin.zip")
expected_files_all = (
["pic👨👩👧👦🎂.jpg", "Benoît.txt", "Icon"],
["pic👨👩👧👦🎂.jpg", "BenoŒt.txt", "Icon"],
)
for zipf, expected_files in zip(zip_files, expected_files_all):
zfile = zipfile.ZipFile(zipf, "r")
download_utils.extractall_unicode(zfile, os.path.dirname("tests/resources/"))
zfile.close()
for expected_file in expected_files:
expected_file_location = os.path.join(
"tests", "resources", "utfissue", expected_file
)
assert os.path.exists(expected_file_location)
os.remove(expected_file_location)
def test_extractall_cp437(mocker, mock_download_from_remote, mock_unzip):
zfile = zipfile.ZipFile("tests/resources/utfissue.zip", "r")
zfile.extractall(os.path.dirname("tests/resources/"))
zfile.close()
expected_files = ["pic👨👩👧👦🎂.jpg", "Benoît.txt", "Icon"]
for expected_file in expected_files:
expected_file_location = os.path.join("tests", "resources", expected_file)
assert not os.path.exists(expected_file_location)
shutil.rmtree(os.path.join("tests", "resources", "__MACOSX"))
shutil.rmtree(os.path.join("tests", "resources", "utfissue")) | 0.424173 | 0.316396 |
import argparse
import logging
import operator
import os
import re
import six
import subprocess
import sys
import yaml
this_dir = os.path.realpath(os.path.dirname(__file__))
sys.path.append(os.path.realpath(os.path.join(os.pardir, os.pardir)))
from geodata.address_formatting.formatter import AddressFormatter
from geodata.coordinates.conversion import latlon_to_decimal
from geodata.encoding import safe_decode
from geodata.file_utils import ensure_dir, download_file
from geodata.i18n.unicode_properties import get_chars_by_script
from geodata.i18n.word_breaks import ideographic_scripts
from geodata.names.deduping import NameDeduper
from geodata.osm.admin_boundaries import OSMNeighborhoodPolygonReader
from geodata.osm.components import osm_address_components
from geodata.osm.definitions import osm_definitions
from geodata.osm.extract import parse_osm, osm_type_and_id, NODE, WAY, RELATION, OSM_NAME_TAGS
from geodata.polygons.index import *
from geodata.polygons.reverse_geocode import QuattroshapesReverseGeocoder, OSMCountryReverseGeocoder, OSMReverseGeocoder
from geodata.statistics.tf_idf import IDFIndex
class NeighborhoodDeduper(NameDeduper):
# Lossless conversions only
replacements = {
u'saint': u'st',
u'and': u'&',
u'〇': u'0',
u'一': u'1',
u'二': u'2',
u'三': u'3',
u'四': u'4',
u'五': u'5',
u'六': u'6',
u'七': u'7',
u'八': u'8',
u'九': u'9',
u'十': u'10',
}
discriminative_words = set([
# Han numbers
u'〇', u'一',
u'二', u'三',
u'四', u'五',
u'六', u'七',
u'八', u'九',
u'十', u'百',
u'千', u'万',
u'億', u'兆',
u'京', u'第',
# Roman numerals
u'i', u'ii',
u'iii', u'iv',
u'v', u'vi',
u'vii', u'viii',
u'ix', u'x',
u'xi', u'xii',
u'xiii', u'xiv',
u'xv', u'xvi',
u'xvii', u'xviii',
u'xix', u'xx',
# English directionals
u'north', u'south',
u'east', u'west',
u'northeast', u'northwest',
u'southeast', u'southwest',
# Spanish, Portguese and Italian directionals
u'norte', u'nord', u'sur', u'sul', u'sud',
u'est', u'este', u'leste', u'oeste', u'ovest',
# New in various languages
u'new',
u'nova',
u'novo',
u'nuevo',
u'nueva',
u'nuovo',
u'nuova',
# Qualifiers
u'heights',
u'hills',
u'upper', u'lower',
u'little', u'great',
u'park',
u'parque',
u'village',
])
stopwords = set([
u'cp',
u'de',
u'la',
u'urbanizacion',
u'do',
u'da',
u'dos',
u'del',
u'community',
u'bairro',
u'barrio',
u'le',
u'el',
u'mah',
u'раион',
u'vila',
u'villa',
u'kampung',
u'ahupua`a',
])
class ClickThatHoodReverseGeocoder(GeohashPolygonIndex):
persistent_polygons = False
cache_size = 0
SCRATCH_DIR = '/tmp'
# Contains accurate boundaries for neighborhoods sans weird GeoPlanet names like "Adelphi" or "Crown Heights South"
NEIGHBORHOODS_REPO = 'https://github.com/codeforamerica/click_that_hood'
config_path = os.path.join(this_dir, os.pardir, os.pardir, os.pardir,
'resources', 'neighborhoods', 'click_that_hood.yaml')
config = yaml.load(open(config_path))
@classmethod
def clone_repo(cls, path):
subprocess.check_call(['rm', '-rf', path])
subprocess.check_call(['git', 'clone', cls.NEIGHBORHOODS_REPO, path])
@classmethod
def create_neighborhoods_index(cls):
scratch_dir = cls.SCRATCH_DIR
repo_path = os.path.join(scratch_dir, 'click_that_hood')
cls.clone_repo(repo_path)
data_path = os.path.join(repo_path, 'public', 'data')
neighborhoods_dir = os.path.join(scratch_dir, 'neighborhoods')
ensure_dir(neighborhoods_dir)
index = cls(save_dir=neighborhoods_dir)
for c in cls.config['files']:
filename = c['filename']
component = c['component']
path = os.path.join(data_path, filename)
features = json.load(open(path))['features']
for f in features:
f['properties']['component'] = component
try:
index.add_geojson_like_file(features)
except ValueError:
continue
return index
class OSMNeighborhoodReverseGeocoder(OSMReverseGeocoder):
persistent_polygons = False
cache_size = 10000
simplify_polygons = False
polygon_reader = OSMNeighborhoodPolygonReader
include_property_patterns = OSMReverseGeocoder.include_property_patterns | set(['postal_code'])
cache_size = 0
SCRATCH_DIR = '/tmp'
@classmethod
def create_neighborhoods_index(cls, osm_neighborhoods_file):
scratch_dir = cls.SCRATCH_DIR
neighborhoods_dir = os.path.join(scratch_dir, 'neighborhoods', 'index')
ensure_dir(neighborhoods_dir)
return cls.create_from_osm_file(osm_neighborhoods_file, output_dir=neighborhoods_dir)
class NeighborhoodReverseGeocoder(RTreePolygonIndex):
'''
Neighborhoods are very important in cities like NYC, SF, Chicago, London
and many others. We want the address parser to be trained with addresses
that sufficiently capture variations in address patterns, including
neighborhoods. Quattroshapes neighborhood data (in the US at least)
is not great in terms of names, mostly becasue GeoPlanet has so many
incorrect names. The neighborhoods project, also known as ClickThatHood
has very accurate polygons with correct names, but only for a handful
of cities. OSM usually lists neighborhoods and some other local admin
areas like boroughs as points rather than polygons.
This index merges all of the above data sets in prioritized order
(ClickThatHood > OSM > Quattroshapes) to provide unified point-in-polygon
tests for neighborhoods. The properties vary by source but each has
source has least a "name" key which in practice is what we care about.
'''
PRIORITIES_FILENAME = 'priorities.json'
DUPE_THRESHOLD = 0.9
persistent_polygons = True
cache_size = 100000
source_priorities = {
'osm': 0, # Best names/polygons, same coordinate system
'osm_cth': 1, # Prefer the OSM names if possible
'clickthathood': 2, # Better names/polygons than Quattroshapes
'osm_quattro': 3, # Prefer OSM names matched with Quattroshapes polygon
'quattroshapes': 4, # Good results in some countries/areas
}
level_priorities = {
'neighborhood': 0,
'local_admin': 1,
}
regex_replacements = [
# Paris arrondissements, listed like "PARIS-1ER-ARRONDISSEMENT" in Quqttroshapes
(re.compile('^paris-(?=[\d])', re.I), ''),
(re.compile('^prague(?= [\d]+$)', re.I), 'Praha'),
]
quattroshapes_city_district_patterns = [
six.u('Praha [\d]+'),
]
quattroshapes_city_district_regex = re.compile('|'.join([six.u('^\s*{}\s*$').format(p) for p in quattroshapes_city_district_patterns]), re.I | re.U)
@classmethod
def count_words(cls, s):
doc = defaultdict(int)
for t, c in NeighborhoodDeduper.content_tokens(s):
doc[t] += 1
return doc
@classmethod
def create_from_osm_and_quattroshapes(cls, filename, quattroshapes_dir, country_rtree_dir, osm_rtree_dir, osm_neighborhood_borders_file, output_dir):
'''
Given an OSM file (planet or some other bounds) containing neighborhoods
as points (some suburbs have boundaries)
and their dependencies, create an R-tree index for coarse-grained
reverse geocoding.
Note: the input file is expected to have been created using
osmfilter. Use fetch_osm_address_data.sh for planet or copy the
admin borders commands if using other geometries.
'''
index = cls(save_dir=output_dir)
logger = logging.getLogger('neighborhoods')
qs_scratch_dir = os.path.join(quattroshapes_dir, 'qs_neighborhoods')
ensure_dir(qs_scratch_dir)
logger.info('Creating ClickThatHood neighborhoods')
cth = ClickThatHoodReverseGeocoder.create_neighborhoods_index()
logger.info('Creating OSM neighborhoods')
osmn = OSMNeighborhoodReverseGeocoder.create_neighborhoods_index(osm_neighborhood_borders_file)
logger.info('Creating Quattroshapes neighborhoods')
qs = QuattroshapesNeighborhoodsReverseGeocoder.create_neighborhoods_index(quattroshapes_dir, qs_scratch_dir)
country_rtree = OSMCountryReverseGeocoder.load(country_rtree_dir)
osm_admin_rtree = OSMReverseGeocoder.load(osm_rtree_dir)
osm_admin_rtree.cache_size = 1000
logger.info('Creating IDF index')
idf = IDFIndex()
char_scripts = get_chars_by_script()
for idx in (cth, qs, osmn):
for i in xrange(idx.i):
props = idx.get_properties(i)
name = props.get('name')
if name is not None:
doc = cls.count_words(name)
idf.update(doc)
for key, attrs, deps in parse_osm(filename):
for k, v in six.iteritems(attrs):
if any((k.startswith(name_key) for name_key in OSM_NAME_TAGS)):
doc = cls.count_words(v)
idf.update(doc)
for i in six.moves.xrange(osmn.i):
props = osmn.get_properties(i)
poly = osmn.get_polygon(i)
props['source'] = 'osm'
props['component'] = AddressFormatter.SUBURB
props['polygon_type'] = 'neighborhood'
index.index_polygon(poly.context)
index.add_polygon(poly.context, props)
qs.matched = [False] * qs.i
cth.matched = [False] * cth.i
logger.info('Matching OSM points to neighborhood polygons')
# Parse OSM and match neighborhood/suburb points to Quattroshapes/ClickThatHood polygons
num_polys = 0
for element_id, attrs, deps in parse_osm(filename):
try:
lat, lon = latlon_to_decimal(attrs['lat'], attrs['lon'])
except ValueError:
continue
osm_name = attrs.get('name')
if not osm_name:
continue
id_type, element_id = element_id.split(':')
element_id = long(element_id)
props['type'] = id_type
props['id'] = element_id
possible_neighborhood = osm_definitions.meets_definition(attrs, osm_definitions.EXTENDED_NEIGHBORHOOD)
is_neighborhood = osm_definitions.meets_definition(attrs, osm_definitions.NEIGHBORHOOD)
country, candidate_languages = country_rtree.country_and_languages(lat, lon)
component_name = None
component_name = osm_address_components.component_from_properties(country, attrs)
ranks = []
osm_names = []
for key in OSM_NAME_TAGS:
name = attrs.get(key)
if name:
osm_names.append(name)
for name_key in OSM_NAME_TAGS:
osm_names.extend([v for k, v in six.iteritems(attrs) if k.startswith('{}:'.format(name_key))])
for idx in (cth, qs):
candidates = idx.get_candidate_polygons(lat, lon, return_all=True)
if candidates:
max_sim = 0.0
arg_max = None
normalized_qs_names = {}
for osm_name in osm_names:
contains_ideographs = any(((char_scripts[ord(c)] or '').lower() in ideographic_scripts
for c in safe_decode(osm_name)))
for i in candidates:
props = idx.get_properties(i)
name = normalized_qs_names.get(i)
if not name:
name = props.get('name')
if not name:
continue
for pattern, repl in cls.regex_replacements:
name = pattern.sub(repl, name)
normalized_qs_names[i] = name
if is_neighborhood and idx is qs and props.get(QuattroshapesReverseGeocoder.LEVEL) != 'neighborhood':
continue
if not contains_ideographs:
sim = NeighborhoodDeduper.compare(osm_name, name, idf)
else:
# Many Han/Hangul characters are common, shouldn't use IDF
sim = NeighborhoodDeduper.compare_ideographs(osm_name, name)
if sim > max_sim:
max_sim = sim
poly = idx.get_polygon(i)
arg_max = (max_sim, props, poly.context, idx, i)
if arg_max:
ranks.append(arg_max)
ranks.sort(key=operator.itemgetter(0), reverse=True)
if ranks and ranks[0][0] >= cls.DUPE_THRESHOLD:
score, props, poly, idx, i = ranks[0]
existing_osm_boundaries = osm_admin_rtree.point_in_poly(lat, lon, return_all=True)
existing_neighborhood_boundaries = osmn.point_in_poly(lat, lon, return_all=True)
skip_node = False
for boundaries in (existing_osm_boundaries, existing_neighborhood_boundaries):
for poly_index, osm_props in enumerate(boundaries):
containing_component = None
name = osm_props.get('name')
# Only exact name matches here since we're comparins OSM to OSM
if name and name.lower() != attrs.get('name', '').lower():
continue
if boundaries is existing_neighborhood_boundaries:
containing_component = AddressFormatter.SUBURB
skip_node = True
break
else:
containing_ids = [(boundary['type'], boundary['id']) for boundary in existing_osm_boundaries[poly_index + 1:]]
containing_component = osm_address_components.component_from_properties(country, osm_props, containing=containing_ids)
if containing_component and containing_component != component_name and AddressFormatter.component_order[containing_component] <= AddressFormatter.component_order[AddressFormatter.CITY]:
skip_node = True
break
if skip_node:
break
# Skip this element
if skip_node:
continue
if idx is cth:
if props['component'] == AddressFormatter.SUBURB:
attrs['polygon_type'] = 'neighborhood'
elif props['component'] == AddressFormatter.CITY_DISTRICT:
attrs['polygon_type'] = 'local_admin'
else:
continue
source = 'osm_cth'
else:
level = props.get(QuattroshapesReverseGeocoder.LEVEL, None)
source = 'osm_quattro'
if level == 'neighborhood':
attrs['polygon_type'] = 'neighborhood'
else:
attrs['polygon_type'] = 'local_admin'
containing_ids = [(boundary['type'], boundary['id']) for boundary in existing_osm_boundaries]
component = osm_address_components.component_from_properties(country, attrs, containing=containing_ids)
attrs['component'] = component
attrs['source'] = source
index.index_polygon(poly)
index.add_polygon(poly, attrs)
idx.matched[i] = True
num_polys += 1
if num_polys % 1000 == 0 and num_polys > 0:
logger.info('did {} neighborhoods'.format(num_polys))
for idx, source in ((cth, 'clickthathood'), (qs, 'quattroshapes')):
for i in xrange(idx.i):
props = idx.get_properties(i)
poly = idx.get_polygon(i)
if idx.matched[i]:
continue
props['source'] = source
if idx is cth:
component = props['component']
if component == AddressFormatter.SUBURB:
props['polygon_type'] = 'neighborhood'
elif component == AddressFormatter.CITY_DISTRICT:
props['polygon_type'] = 'local_admin'
else:
continue
elif props.get(QuattroshapesReverseGeocoder.LEVEL, None) == 'neighborhood':
component = AddressFormatter.SUBURB
name = props.get('name')
if not name:
continue
for pattern, repl in cls.regex_replacements:
name = pattern.sub(repl, name)
props['name'] = name
if cls.quattroshapes_city_district_regex.match(name):
component = AddressFormatter.CITY_DISTRICT
props['component'] = component
props['polygon_type'] = 'neighborhood'
else:
# We don't actually care about local admin polygons unless they match OSM
continue
index.index_polygon(poly.context)
index.add_polygon(poly.context, props)
return index
def setup(self):
self.priorities = []
def index_polygon_properties(self, properties):
self.priorities.append((self.level_priorities[properties['polygon_type']], self.source_priorities[properties['source']]))
def load_polygon_properties(self, d):
self.priorities = [tuple(p) for p in json.load(open(os.path.join(d, self.PRIORITIES_FILENAME)))]
def save_polygon_properties(self, d):
json.dump(self.priorities, open(os.path.join(d, self.PRIORITIES_FILENAME), 'w'))
def priority(self, i):
return self.priorities[i]
def get_candidate_polygons(self, lat, lon):
candidates = super(NeighborhoodReverseGeocoder, self).get_candidate_polygons(lat, lon)
return sorted(candidates, key=self.priority)
class QuattroshapesNeighborhoodsReverseGeocoder(GeohashPolygonIndex, QuattroshapesReverseGeocoder):
persistent_polygons = False
cache_size = None
@classmethod
def create_neighborhoods_index(cls, quattroshapes_dir,
output_dir,
index_filename=None,
polys_filename=DEFAULT_POLYS_FILENAME):
local_admin_filename = os.path.join(quattroshapes_dir, cls.LOCAL_ADMIN_FILENAME)
neighborhoods_filename = os.path.join(quattroshapes_dir, cls.NEIGHBORHOODS_FILENAME)
return cls.create_from_shapefiles([local_admin_filename, neighborhoods_filename],
output_dir, index_filename=index_filename,
polys_filename=polys_filename)
if __name__ == '__main__':
# Handle argument parsing here
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--quattroshapes-dir',
help='Path to quattroshapes dir')
parser.add_argument('-a', '--osm-admin-rtree-dir',
help='Path to OSM admin rtree dir')
parser.add_argument('-c', '--country-rtree-dir',
help='Path to country rtree dir')
parser.add_argument('-b', '--osm-neighborhood-borders-file',
help='Path to OSM neighborhood borders file (with dependencies, .osm format)')
parser.add_argument('-n', '--osm-neighborhoods-file',
help='Path to OSM neighborhoods file (no dependencies, .osm format)')
parser.add_argument('-o', '--out-dir',
default=os.getcwd(),
help='Output directory')
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
if args.osm_neighborhoods_file and args.quattroshapes_dir and args.osm_admin_rtree_dir and args.country_rtree_dir and args.osm_neighborhood_borders_file:
index = NeighborhoodReverseGeocoder.create_from_osm_and_quattroshapes(
args.osm_neighborhoods_file,
args.quattroshapes_dir,
args.country_rtree_dir,
args.osm_admin_rtree_dir,
args.osm_neighborhood_borders_file,
args.out_dir
)
else:
parser.error('Must specify quattroshapes dir or osm admin borders file')
index.save() | scripts/geodata/neighborhoods/reverse_geocode.py | import argparse
import logging
import operator
import os
import re
import six
import subprocess
import sys
import yaml
this_dir = os.path.realpath(os.path.dirname(__file__))
sys.path.append(os.path.realpath(os.path.join(os.pardir, os.pardir)))
from geodata.address_formatting.formatter import AddressFormatter
from geodata.coordinates.conversion import latlon_to_decimal
from geodata.encoding import safe_decode
from geodata.file_utils import ensure_dir, download_file
from geodata.i18n.unicode_properties import get_chars_by_script
from geodata.i18n.word_breaks import ideographic_scripts
from geodata.names.deduping import NameDeduper
from geodata.osm.admin_boundaries import OSMNeighborhoodPolygonReader
from geodata.osm.components import osm_address_components
from geodata.osm.definitions import osm_definitions
from geodata.osm.extract import parse_osm, osm_type_and_id, NODE, WAY, RELATION, OSM_NAME_TAGS
from geodata.polygons.index import *
from geodata.polygons.reverse_geocode import QuattroshapesReverseGeocoder, OSMCountryReverseGeocoder, OSMReverseGeocoder
from geodata.statistics.tf_idf import IDFIndex
class NeighborhoodDeduper(NameDeduper):
# Lossless conversions only
replacements = {
u'saint': u'st',
u'and': u'&',
u'〇': u'0',
u'一': u'1',
u'二': u'2',
u'三': u'3',
u'四': u'4',
u'五': u'5',
u'六': u'6',
u'七': u'7',
u'八': u'8',
u'九': u'9',
u'十': u'10',
}
discriminative_words = set([
# Han numbers
u'〇', u'一',
u'二', u'三',
u'四', u'五',
u'六', u'七',
u'八', u'九',
u'十', u'百',
u'千', u'万',
u'億', u'兆',
u'京', u'第',
# Roman numerals
u'i', u'ii',
u'iii', u'iv',
u'v', u'vi',
u'vii', u'viii',
u'ix', u'x',
u'xi', u'xii',
u'xiii', u'xiv',
u'xv', u'xvi',
u'xvii', u'xviii',
u'xix', u'xx',
# English directionals
u'north', u'south',
u'east', u'west',
u'northeast', u'northwest',
u'southeast', u'southwest',
# Spanish, Portguese and Italian directionals
u'norte', u'nord', u'sur', u'sul', u'sud',
u'est', u'este', u'leste', u'oeste', u'ovest',
# New in various languages
u'new',
u'nova',
u'novo',
u'nuevo',
u'nueva',
u'nuovo',
u'nuova',
# Qualifiers
u'heights',
u'hills',
u'upper', u'lower',
u'little', u'great',
u'park',
u'parque',
u'village',
])
stopwords = set([
u'cp',
u'de',
u'la',
u'urbanizacion',
u'do',
u'da',
u'dos',
u'del',
u'community',
u'bairro',
u'barrio',
u'le',
u'el',
u'mah',
u'раион',
u'vila',
u'villa',
u'kampung',
u'ahupua`a',
])
class ClickThatHoodReverseGeocoder(GeohashPolygonIndex):
persistent_polygons = False
cache_size = 0
SCRATCH_DIR = '/tmp'
# Contains accurate boundaries for neighborhoods sans weird GeoPlanet names like "Adelphi" or "Crown Heights South"
NEIGHBORHOODS_REPO = 'https://github.com/codeforamerica/click_that_hood'
config_path = os.path.join(this_dir, os.pardir, os.pardir, os.pardir,
'resources', 'neighborhoods', 'click_that_hood.yaml')
config = yaml.load(open(config_path))
@classmethod
def clone_repo(cls, path):
subprocess.check_call(['rm', '-rf', path])
subprocess.check_call(['git', 'clone', cls.NEIGHBORHOODS_REPO, path])
@classmethod
def create_neighborhoods_index(cls):
scratch_dir = cls.SCRATCH_DIR
repo_path = os.path.join(scratch_dir, 'click_that_hood')
cls.clone_repo(repo_path)
data_path = os.path.join(repo_path, 'public', 'data')
neighborhoods_dir = os.path.join(scratch_dir, 'neighborhoods')
ensure_dir(neighborhoods_dir)
index = cls(save_dir=neighborhoods_dir)
for c in cls.config['files']:
filename = c['filename']
component = c['component']
path = os.path.join(data_path, filename)
features = json.load(open(path))['features']
for f in features:
f['properties']['component'] = component
try:
index.add_geojson_like_file(features)
except ValueError:
continue
return index
class OSMNeighborhoodReverseGeocoder(OSMReverseGeocoder):
persistent_polygons = False
cache_size = 10000
simplify_polygons = False
polygon_reader = OSMNeighborhoodPolygonReader
include_property_patterns = OSMReverseGeocoder.include_property_patterns | set(['postal_code'])
cache_size = 0
SCRATCH_DIR = '/tmp'
@classmethod
def create_neighborhoods_index(cls, osm_neighborhoods_file):
scratch_dir = cls.SCRATCH_DIR
neighborhoods_dir = os.path.join(scratch_dir, 'neighborhoods', 'index')
ensure_dir(neighborhoods_dir)
return cls.create_from_osm_file(osm_neighborhoods_file, output_dir=neighborhoods_dir)
class NeighborhoodReverseGeocoder(RTreePolygonIndex):
'''
Neighborhoods are very important in cities like NYC, SF, Chicago, London
and many others. We want the address parser to be trained with addresses
that sufficiently capture variations in address patterns, including
neighborhoods. Quattroshapes neighborhood data (in the US at least)
is not great in terms of names, mostly becasue GeoPlanet has so many
incorrect names. The neighborhoods project, also known as ClickThatHood
has very accurate polygons with correct names, but only for a handful
of cities. OSM usually lists neighborhoods and some other local admin
areas like boroughs as points rather than polygons.
This index merges all of the above data sets in prioritized order
(ClickThatHood > OSM > Quattroshapes) to provide unified point-in-polygon
tests for neighborhoods. The properties vary by source but each has
source has least a "name" key which in practice is what we care about.
'''
PRIORITIES_FILENAME = 'priorities.json'
DUPE_THRESHOLD = 0.9
persistent_polygons = True
cache_size = 100000
source_priorities = {
'osm': 0, # Best names/polygons, same coordinate system
'osm_cth': 1, # Prefer the OSM names if possible
'clickthathood': 2, # Better names/polygons than Quattroshapes
'osm_quattro': 3, # Prefer OSM names matched with Quattroshapes polygon
'quattroshapes': 4, # Good results in some countries/areas
}
level_priorities = {
'neighborhood': 0,
'local_admin': 1,
}
regex_replacements = [
# Paris arrondissements, listed like "PARIS-1ER-ARRONDISSEMENT" in Quqttroshapes
(re.compile('^paris-(?=[\d])', re.I), ''),
(re.compile('^prague(?= [\d]+$)', re.I), 'Praha'),
]
quattroshapes_city_district_patterns = [
six.u('Praha [\d]+'),
]
quattroshapes_city_district_regex = re.compile('|'.join([six.u('^\s*{}\s*$').format(p) for p in quattroshapes_city_district_patterns]), re.I | re.U)
@classmethod
def count_words(cls, s):
doc = defaultdict(int)
for t, c in NeighborhoodDeduper.content_tokens(s):
doc[t] += 1
return doc
@classmethod
def create_from_osm_and_quattroshapes(cls, filename, quattroshapes_dir, country_rtree_dir, osm_rtree_dir, osm_neighborhood_borders_file, output_dir):
'''
Given an OSM file (planet or some other bounds) containing neighborhoods
as points (some suburbs have boundaries)
and their dependencies, create an R-tree index for coarse-grained
reverse geocoding.
Note: the input file is expected to have been created using
osmfilter. Use fetch_osm_address_data.sh for planet or copy the
admin borders commands if using other geometries.
'''
index = cls(save_dir=output_dir)
logger = logging.getLogger('neighborhoods')
qs_scratch_dir = os.path.join(quattroshapes_dir, 'qs_neighborhoods')
ensure_dir(qs_scratch_dir)
logger.info('Creating ClickThatHood neighborhoods')
cth = ClickThatHoodReverseGeocoder.create_neighborhoods_index()
logger.info('Creating OSM neighborhoods')
osmn = OSMNeighborhoodReverseGeocoder.create_neighborhoods_index(osm_neighborhood_borders_file)
logger.info('Creating Quattroshapes neighborhoods')
qs = QuattroshapesNeighborhoodsReverseGeocoder.create_neighborhoods_index(quattroshapes_dir, qs_scratch_dir)
country_rtree = OSMCountryReverseGeocoder.load(country_rtree_dir)
osm_admin_rtree = OSMReverseGeocoder.load(osm_rtree_dir)
osm_admin_rtree.cache_size = 1000
logger.info('Creating IDF index')
idf = IDFIndex()
char_scripts = get_chars_by_script()
for idx in (cth, qs, osmn):
for i in xrange(idx.i):
props = idx.get_properties(i)
name = props.get('name')
if name is not None:
doc = cls.count_words(name)
idf.update(doc)
for key, attrs, deps in parse_osm(filename):
for k, v in six.iteritems(attrs):
if any((k.startswith(name_key) for name_key in OSM_NAME_TAGS)):
doc = cls.count_words(v)
idf.update(doc)
for i in six.moves.xrange(osmn.i):
props = osmn.get_properties(i)
poly = osmn.get_polygon(i)
props['source'] = 'osm'
props['component'] = AddressFormatter.SUBURB
props['polygon_type'] = 'neighborhood'
index.index_polygon(poly.context)
index.add_polygon(poly.context, props)
qs.matched = [False] * qs.i
cth.matched = [False] * cth.i
logger.info('Matching OSM points to neighborhood polygons')
# Parse OSM and match neighborhood/suburb points to Quattroshapes/ClickThatHood polygons
num_polys = 0
for element_id, attrs, deps in parse_osm(filename):
try:
lat, lon = latlon_to_decimal(attrs['lat'], attrs['lon'])
except ValueError:
continue
osm_name = attrs.get('name')
if not osm_name:
continue
id_type, element_id = element_id.split(':')
element_id = long(element_id)
props['type'] = id_type
props['id'] = element_id
possible_neighborhood = osm_definitions.meets_definition(attrs, osm_definitions.EXTENDED_NEIGHBORHOOD)
is_neighborhood = osm_definitions.meets_definition(attrs, osm_definitions.NEIGHBORHOOD)
country, candidate_languages = country_rtree.country_and_languages(lat, lon)
component_name = None
component_name = osm_address_components.component_from_properties(country, attrs)
ranks = []
osm_names = []
for key in OSM_NAME_TAGS:
name = attrs.get(key)
if name:
osm_names.append(name)
for name_key in OSM_NAME_TAGS:
osm_names.extend([v for k, v in six.iteritems(attrs) if k.startswith('{}:'.format(name_key))])
for idx in (cth, qs):
candidates = idx.get_candidate_polygons(lat, lon, return_all=True)
if candidates:
max_sim = 0.0
arg_max = None
normalized_qs_names = {}
for osm_name in osm_names:
contains_ideographs = any(((char_scripts[ord(c)] or '').lower() in ideographic_scripts
for c in safe_decode(osm_name)))
for i in candidates:
props = idx.get_properties(i)
name = normalized_qs_names.get(i)
if not name:
name = props.get('name')
if not name:
continue
for pattern, repl in cls.regex_replacements:
name = pattern.sub(repl, name)
normalized_qs_names[i] = name
if is_neighborhood and idx is qs and props.get(QuattroshapesReverseGeocoder.LEVEL) != 'neighborhood':
continue
if not contains_ideographs:
sim = NeighborhoodDeduper.compare(osm_name, name, idf)
else:
# Many Han/Hangul characters are common, shouldn't use IDF
sim = NeighborhoodDeduper.compare_ideographs(osm_name, name)
if sim > max_sim:
max_sim = sim
poly = idx.get_polygon(i)
arg_max = (max_sim, props, poly.context, idx, i)
if arg_max:
ranks.append(arg_max)
ranks.sort(key=operator.itemgetter(0), reverse=True)
if ranks and ranks[0][0] >= cls.DUPE_THRESHOLD:
score, props, poly, idx, i = ranks[0]
existing_osm_boundaries = osm_admin_rtree.point_in_poly(lat, lon, return_all=True)
existing_neighborhood_boundaries = osmn.point_in_poly(lat, lon, return_all=True)
skip_node = False
for boundaries in (existing_osm_boundaries, existing_neighborhood_boundaries):
for poly_index, osm_props in enumerate(boundaries):
containing_component = None
name = osm_props.get('name')
# Only exact name matches here since we're comparins OSM to OSM
if name and name.lower() != attrs.get('name', '').lower():
continue
if boundaries is existing_neighborhood_boundaries:
containing_component = AddressFormatter.SUBURB
skip_node = True
break
else:
containing_ids = [(boundary['type'], boundary['id']) for boundary in existing_osm_boundaries[poly_index + 1:]]
containing_component = osm_address_components.component_from_properties(country, osm_props, containing=containing_ids)
if containing_component and containing_component != component_name and AddressFormatter.component_order[containing_component] <= AddressFormatter.component_order[AddressFormatter.CITY]:
skip_node = True
break
if skip_node:
break
# Skip this element
if skip_node:
continue
if idx is cth:
if props['component'] == AddressFormatter.SUBURB:
attrs['polygon_type'] = 'neighborhood'
elif props['component'] == AddressFormatter.CITY_DISTRICT:
attrs['polygon_type'] = 'local_admin'
else:
continue
source = 'osm_cth'
else:
level = props.get(QuattroshapesReverseGeocoder.LEVEL, None)
source = 'osm_quattro'
if level == 'neighborhood':
attrs['polygon_type'] = 'neighborhood'
else:
attrs['polygon_type'] = 'local_admin'
containing_ids = [(boundary['type'], boundary['id']) for boundary in existing_osm_boundaries]
component = osm_address_components.component_from_properties(country, attrs, containing=containing_ids)
attrs['component'] = component
attrs['source'] = source
index.index_polygon(poly)
index.add_polygon(poly, attrs)
idx.matched[i] = True
num_polys += 1
if num_polys % 1000 == 0 and num_polys > 0:
logger.info('did {} neighborhoods'.format(num_polys))
for idx, source in ((cth, 'clickthathood'), (qs, 'quattroshapes')):
for i in xrange(idx.i):
props = idx.get_properties(i)
poly = idx.get_polygon(i)
if idx.matched[i]:
continue
props['source'] = source
if idx is cth:
component = props['component']
if component == AddressFormatter.SUBURB:
props['polygon_type'] = 'neighborhood'
elif component == AddressFormatter.CITY_DISTRICT:
props['polygon_type'] = 'local_admin'
else:
continue
elif props.get(QuattroshapesReverseGeocoder.LEVEL, None) == 'neighborhood':
component = AddressFormatter.SUBURB
name = props.get('name')
if not name:
continue
for pattern, repl in cls.regex_replacements:
name = pattern.sub(repl, name)
props['name'] = name
if cls.quattroshapes_city_district_regex.match(name):
component = AddressFormatter.CITY_DISTRICT
props['component'] = component
props['polygon_type'] = 'neighborhood'
else:
# We don't actually care about local admin polygons unless they match OSM
continue
index.index_polygon(poly.context)
index.add_polygon(poly.context, props)
return index
def setup(self):
self.priorities = []
def index_polygon_properties(self, properties):
self.priorities.append((self.level_priorities[properties['polygon_type']], self.source_priorities[properties['source']]))
def load_polygon_properties(self, d):
self.priorities = [tuple(p) for p in json.load(open(os.path.join(d, self.PRIORITIES_FILENAME)))]
def save_polygon_properties(self, d):
json.dump(self.priorities, open(os.path.join(d, self.PRIORITIES_FILENAME), 'w'))
def priority(self, i):
return self.priorities[i]
def get_candidate_polygons(self, lat, lon):
candidates = super(NeighborhoodReverseGeocoder, self).get_candidate_polygons(lat, lon)
return sorted(candidates, key=self.priority)
class QuattroshapesNeighborhoodsReverseGeocoder(GeohashPolygonIndex, QuattroshapesReverseGeocoder):
persistent_polygons = False
cache_size = None
@classmethod
def create_neighborhoods_index(cls, quattroshapes_dir,
output_dir,
index_filename=None,
polys_filename=DEFAULT_POLYS_FILENAME):
local_admin_filename = os.path.join(quattroshapes_dir, cls.LOCAL_ADMIN_FILENAME)
neighborhoods_filename = os.path.join(quattroshapes_dir, cls.NEIGHBORHOODS_FILENAME)
return cls.create_from_shapefiles([local_admin_filename, neighborhoods_filename],
output_dir, index_filename=index_filename,
polys_filename=polys_filename)
if __name__ == '__main__':
# Handle argument parsing here
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--quattroshapes-dir',
help='Path to quattroshapes dir')
parser.add_argument('-a', '--osm-admin-rtree-dir',
help='Path to OSM admin rtree dir')
parser.add_argument('-c', '--country-rtree-dir',
help='Path to country rtree dir')
parser.add_argument('-b', '--osm-neighborhood-borders-file',
help='Path to OSM neighborhood borders file (with dependencies, .osm format)')
parser.add_argument('-n', '--osm-neighborhoods-file',
help='Path to OSM neighborhoods file (no dependencies, .osm format)')
parser.add_argument('-o', '--out-dir',
default=os.getcwd(),
help='Output directory')
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
if args.osm_neighborhoods_file and args.quattroshapes_dir and args.osm_admin_rtree_dir and args.country_rtree_dir and args.osm_neighborhood_borders_file:
index = NeighborhoodReverseGeocoder.create_from_osm_and_quattroshapes(
args.osm_neighborhoods_file,
args.quattroshapes_dir,
args.country_rtree_dir,
args.osm_admin_rtree_dir,
args.osm_neighborhood_borders_file,
args.out_dir
)
else:
parser.error('Must specify quattroshapes dir or osm admin borders file')
index.save() | 0.396769 | 0.147617 |
import os
from scielo_v3_manager.v3_gen import generates
def add_pids_to_xml(xml_sps, document, xml_file_path, pid_v2, v3_manager):
"""
Garante que o PID v3 esteja no XML e que ele esteja registrado no SPF,
seja no Article ou no v3_manager
"""
# add scielo_pid_v2, se aplicável
_add_scielo_pid_v2_to_xml(xml_sps, pid_v2)
_add_document_pids_to_xml(xml_sps, document)
if not xml_sps.scielo_pid_v3:
_add_v3_manager_pids_to_xml(v3_manager, xml_sps, xml_file_path)
def _add_scielo_pid_v2_to_xml(xml_sps, pid_v2):
"""
Add scielo_pid_v2 to xml_sps
Parameters
----------
xml_sps : dsm.data.sps_package.SPS_Package
dados do pacote XML + PDFs + imagens
pid_v2 : str
pid v2, required if absent in XML
"""
xml_sps.scielo_pid_v2 = xml_sps.scielo_pid_v2 or pid_v2
def _add_document_pids_to_xml(xml_sps, document):
"""
Completa os PIDs do XML com os PIDs do documento registrado
Parameters
----------
xml_sps : dsm.data.sps_package.SPS_Package
objeto para manipular o XML
document : opac_schema.v1.models.Article
documento registrado
"""
if document and document._id:
# completa XML com os PIDs de artigos já publicados no site
xml_sps.scielo_pid_v3 = xml_sps.scielo_pid_v3 or document._id
if xml_sps.aop_pid or document.aop_pid:
xml_sps.aop_pid = xml_sps.aop_pid or document.aop_pid
if document.pid != xml_sps.scielo_pid_v2:
xml_sps.aop_pid = document.pid
def _add_v3_manager_pids_to_xml(v3_manager, xml_sps, xml_file_path):
"""
Completa os PIDs do XML com os PIDs do registro do v3_manager
Verifica se o documento tem ou não pid v3 registrado.
Se não tiver registrado, e nem estiver sugerido no XML,
o pid v3 é gerado.
Registra pid v3, se não tiver registrado.
Parameters
----------
xml_sps : dsm.data.sps_package.SPS_Package
objeto para manipular o XML
xml_file_path : str
nome do arquivo XML
Returns
-------
dict
Se encontrado `erro`, retorna
{"error": "mensagem de erro"}
Se registrado, retorna os dados do registro de v3_manager
{
"v3": record.v3,
"v2": record.v2,
"aop": record.aop,
"doi": record.doi,
"status": record.status,
"filename": record.filename,
"created": record.created,
"updated": record.updated,
}
"""
if not v3_manager:
return {"error": "v3_manager is not instanciated"}
# completa XML com os PIDs de artigos registrados no v3_manager
if not xml_sps.scielo_pid_v2:
return {"error": "Required PID v2"}
result = v3_manager.manage(
v2=xml_sps.scielo_pid_v2,
v3=xml_sps.scielo_pid_v3,
aop=xml_sps.aop_pid,
filename=os.path.basename(xml_file_path),
doi=xml_sps.doi,
status="active",
generate_v3=generates,
)
if result.get("error"):
return result
record = result.get("saved") or result.get("registered")
xml_sps.scielo_pid_v2 = xml_sps.scielo_pid_v2 or record.get("v2")
xml_sps.scielo_pid_v3 = xml_sps.scielo_pid_v3 or record.get("v3")
xml_sps.aop_pid = xml_sps.aop_pid or record.get("aop")
xml_sps.doi = xml_sps.doi or record.get("doi")
return record | dsm/extdeps/doc_ids_manager.py | import os
from scielo_v3_manager.v3_gen import generates
def add_pids_to_xml(xml_sps, document, xml_file_path, pid_v2, v3_manager):
"""
Garante que o PID v3 esteja no XML e que ele esteja registrado no SPF,
seja no Article ou no v3_manager
"""
# add scielo_pid_v2, se aplicável
_add_scielo_pid_v2_to_xml(xml_sps, pid_v2)
_add_document_pids_to_xml(xml_sps, document)
if not xml_sps.scielo_pid_v3:
_add_v3_manager_pids_to_xml(v3_manager, xml_sps, xml_file_path)
def _add_scielo_pid_v2_to_xml(xml_sps, pid_v2):
"""
Add scielo_pid_v2 to xml_sps
Parameters
----------
xml_sps : dsm.data.sps_package.SPS_Package
dados do pacote XML + PDFs + imagens
pid_v2 : str
pid v2, required if absent in XML
"""
xml_sps.scielo_pid_v2 = xml_sps.scielo_pid_v2 or pid_v2
def _add_document_pids_to_xml(xml_sps, document):
"""
Completa os PIDs do XML com os PIDs do documento registrado
Parameters
----------
xml_sps : dsm.data.sps_package.SPS_Package
objeto para manipular o XML
document : opac_schema.v1.models.Article
documento registrado
"""
if document and document._id:
# completa XML com os PIDs de artigos já publicados no site
xml_sps.scielo_pid_v3 = xml_sps.scielo_pid_v3 or document._id
if xml_sps.aop_pid or document.aop_pid:
xml_sps.aop_pid = xml_sps.aop_pid or document.aop_pid
if document.pid != xml_sps.scielo_pid_v2:
xml_sps.aop_pid = document.pid
def _add_v3_manager_pids_to_xml(v3_manager, xml_sps, xml_file_path):
"""
Completa os PIDs do XML com os PIDs do registro do v3_manager
Verifica se o documento tem ou não pid v3 registrado.
Se não tiver registrado, e nem estiver sugerido no XML,
o pid v3 é gerado.
Registra pid v3, se não tiver registrado.
Parameters
----------
xml_sps : dsm.data.sps_package.SPS_Package
objeto para manipular o XML
xml_file_path : str
nome do arquivo XML
Returns
-------
dict
Se encontrado `erro`, retorna
{"error": "mensagem de erro"}
Se registrado, retorna os dados do registro de v3_manager
{
"v3": record.v3,
"v2": record.v2,
"aop": record.aop,
"doi": record.doi,
"status": record.status,
"filename": record.filename,
"created": record.created,
"updated": record.updated,
}
"""
if not v3_manager:
return {"error": "v3_manager is not instanciated"}
# completa XML com os PIDs de artigos registrados no v3_manager
if not xml_sps.scielo_pid_v2:
return {"error": "Required PID v2"}
result = v3_manager.manage(
v2=xml_sps.scielo_pid_v2,
v3=xml_sps.scielo_pid_v3,
aop=xml_sps.aop_pid,
filename=os.path.basename(xml_file_path),
doi=xml_sps.doi,
status="active",
generate_v3=generates,
)
if result.get("error"):
return result
record = result.get("saved") or result.get("registered")
xml_sps.scielo_pid_v2 = xml_sps.scielo_pid_v2 or record.get("v2")
xml_sps.scielo_pid_v3 = xml_sps.scielo_pid_v3 or record.get("v3")
xml_sps.aop_pid = xml_sps.aop_pid or record.get("aop")
xml_sps.doi = xml_sps.doi or record.get("doi")
return record | 0.518302 | 0.183887 |
import csv
import json
import os
import click
from os.path import isfile, isdir
from pathlib import Path
from policy_sentry.util.actions import get_service_from_action
from policy_sentry.analysis.analyze import analyze_by_access_level
from policy_sentry.shared.constants import DATABASE_FILE_PATH
from policy_sentry.shared.database import connect_db
from policy_sentry.util.file import list_files_in_directory
from policy_sentry.command.initialize import initialize
from common.awsinfo import is_aws_managed_policy
HOME = str(Path.home())
if isfile(HOME + '/.policy_sentry/aws.sqlite3'):
print("Policy Sentry database found. Continuing...")
else:
print("NOTE: Policy Sentry database not found. Initializing...")
initialize()
db_session = connect_db(DATABASE_FILE_PATH)
@click.command(
short_help='analyze the JSON formatted results of the find_public_instances_with_roles script.'
)
@click.option(
'--input-file',
type=click.Path(exists=True),
help='Path to the JSON file you want to analyze, or a directory of those files. '
'Defaults to the directory "./reports/accounts/"',
default=os.getcwd() + '/reports/accounts/'
)
@click.option(
'--output',
# required=False,
type=click.Path(exists=True),
default=os.getcwd() + '/reports/',
help='Directory to store the reports. Defaults to "/reports/"'
)
def analyze_public_instances_results(input_file, output):
all_results = []
if isfile(input_file):
account_alias, account_id, results = analyze_file(input_file)
print(json.dumps(results, indent=2))
elif isdir(input_file):
file_list = list_files_in_directory(input_file)
for file in file_list:
if file.endswith(".json"):
account_alias, account_id, results = analyze_file(input_file + file)
this_result = {
'account_id': account_id,
'account_alias': account_alias,
'results': results
}
all_results.append(this_result)
if isfile(output):
with open(output, 'w') as results_file:
json.dump(all_results, results_file, indent=4)
write_csv_report(all_results, output + 'results.csv')
elif isdir(output):
with open(output + 'results.json', 'w') as results_file:
json.dump(all_results, results_file, indent=4)
write_csv_report(all_results, output + 'results.csv')
def analyze_file(input):
with open(input, 'r') as json_file:
datastore = json.load(json_file)
base_name = os.path.basename(input)
base_name_no_extension = os.path.splitext(os.path.basename(base_name))[0]
account_id, roles_with_public_access = get_list_of_roles_in_account_with_public_access(base_name_no_extension,
datastore)
account_alias = base_name_no_extension
return account_alias, account_id, roles_with_public_access
def get_list_of_roles_in_account_with_public_access(account_id, json_data):
"""
Example results:
[
{
"role_name": "SomeRole",
"public_ips": 3,
"policies_count": 3,
"aws_managed_policies_count": 3,
"aws_managed_policies": [
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonS3FullAccess"
],
"custom_policies": [
"arn:aws:iam::012345678901:policy/Tupac-Is",
"arn:aws:iam::012345678901:policy/StillAlive"
]
},
{
"role_name": "SomeRole2",
"public_ips": 4,
"policies_count": 1,
"aws_managed_policies_count": 1,
"aws_managed_policies": [
"arn:aws:iam::aws:policy/AmazonS3FullAccess"
],
"customer_managed_policies": [
"arn:aws:iam::0123456789012:policy/JEpstein",
"arn:aws:iam::012345678901:policy/DidntHangHimself"
]
}
]
"""
roles = []
aws_managed_policies = []
customer_managed_policies = []
list_of_account_ids = json_data.keys() # should only be one item; lazy messy quick fix
for account_id in list_of_account_ids:
for region in json_data[account_id]:
for role in json_data[account_id][region]:
services_role_can_modify = []
aws_managed_policies_count = 0
role_permissions_management_abilities = []
role_write_abilities = []
for policy in role['policies']:
if is_aws_managed_policy(policy['policy_arn']) and policy['policy_arn'] not in aws_managed_policies:
aws_managed_policies_count += 1
aws_managed_policies.append(policy['policy_arn'])
permissions_management_abilities = has_permissions_management_access(policy['policy_document'])
if permissions_management_abilities:
role_permissions_management_abilities.extend(permissions_management_abilities)
write_abilities = has_write_access(policy['policy_document'])
if write_abilities:
role_write_abilities.extend(write_abilities)
services_role_can_modify = get_service_prefixes_role_can_modify(policy['policy_document'])
elif not is_aws_managed_policy(policy['policy_arn']) and policy['policy_arn'] not in \
customer_managed_policies:
customer_managed_policies.append(policy['policy_arn'])
permissions_management_abilities = has_permissions_management_access(policy['policy_document'])
if permissions_management_abilities:
role_permissions_management_abilities.extend(permissions_management_abilities)
write_abilities = has_write_access(policy['policy_document'])
if write_abilities:
role_write_abilities.extend(write_abilities)
services_role_can_modify = get_service_prefixes_role_can_modify(policy['policy_document'])
# else:
# pass
aws_managed_policies.sort()
customer_managed_policies.sort()
services_role_can_modify.sort()
role_permissions_management_abilities.sort()
role_write_abilities.sort()
this_role = {
'role_name': role['role_name'],
'public_ips': len(role['instances']), # The number of public IP addresses with that role
'policies_count': len(role['policies']),
'aws_managed_policies_count': aws_managed_policies_count,
'aws_managed_policies': aws_managed_policies,
'customer_managed_policies': customer_managed_policies,
'services_role_can_modify': services_role_can_modify,
'write_abilities': role_write_abilities,
'permissions_management_abilities': role_permissions_management_abilities
}
roles.append(this_role)
return account_id, roles
def write_csv_report(data, results_summary_file):
with open(results_summary_file, 'w') as csvfile:
fieldnames = ['account_id', 'account_alias', 'role_name', 'aws_managed_policies_count', 'public_ip_count',
'policies_count', 'permissions_management_abilities_count', 'write_abilities_count',
'services_role_can_modify_count', 'aws_managed_policies', 'customer_managed_policies',
'services_role_can_modify', 'write_abilities', 'permissions_management_abilities']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect='excel')
writer.writeheader()
for account in data:
account_id = account['account_id']
account_alias = account['account_alias']
for role in account['results']:
role_name = role['role_name']
public_ip_count = role['public_ips']
aws_managed_policies_count = role['aws_managed_policies_count']
policies_count = role['policies_count']
write_abilities_count = str(len(role['write_abilities']))
permissions_management_abilities_count = str(len(role['permissions_management_abilities']))
services_role_can_modify_count = str(len(role['services_role_can_modify']))
aws_managed_policies = role['aws_managed_policies']
customer_managed_policies = role['customer_managed_policies']
services_role_can_modify = role['services_role_can_modify']
write_abilities = role['write_abilities']
permissions_management_abilities = role['permissions_management_abilities']
aws_managed_policies_result = '; '
customer_managed_policies_result = '; '
services_role_can_modify_result = '; '
write_abilities_result = '; '
permissions_management_abilities_result = '; '
writer.writerow({
'account_id': account_id,
'account_alias': account_alias,
'role_name': role_name,
'aws_managed_policies_count': aws_managed_policies_count,
'public_ip_count': public_ip_count,
'policies_count': policies_count,
'permissions_management_abilities_count': permissions_management_abilities_count,
'write_abilities_count': write_abilities_count,
'services_role_can_modify_count': services_role_can_modify_count,
'aws_managed_policies': aws_managed_policies_result.join(aws_managed_policies),
'customer_managed_policies': customer_managed_policies_result.join(customer_managed_policies),
'services_role_can_modify': services_role_can_modify_result.join(services_role_can_modify),
'write_abilities': write_abilities_result.join(write_abilities),
'permissions_management_abilities': permissions_management_abilities_result.join(permissions_management_abilities)
})
print(f"Finished writing data for account {account_id}")
def has_permissions_management_access(policy):
"""
Given a policy as a dictionary, determine if the policy grants Permissions management access.
If so, return a list of IAM Actions that grant Permissions management access level. If not, return false.
"""
permissions_management_actions = analyze_by_access_level(policy, db_session, "permissions-management") # TODO: Test this.
if len(permissions_management_actions) > 0:
return permissions_management_actions
else:
return []
def has_write_access(policy):
"""
Given a policy as a dictionary, determine if the policy grants Permissions management access.
If so, return a list of IAM Actions that grant Permissions management access level. If not, return false.
"""
permissions_management_actions = analyze_by_access_level(policy, db_session, "permissions-management")
if len(permissions_management_actions) > 0:
return permissions_management_actions
else:
return []
def get_service_prefixes_role_can_modify(policy):
"""
Given a policy as a dictionary, determine what AWS services the policy grants at the write
or permissions management access levels.
"""
service_prefixes_with_write_access = []
service_prefixes_with_permissions_management_access = []
write_access_actions = has_write_access(policy) # TODO: Verify the accuracy of this.
permissions_management_actions = has_permissions_management_access(policy)
if write_access_actions:
for action in write_access_actions:
service_name = get_service_from_action(action)
if service_name not in service_prefixes_with_write_access:
service_prefixes_with_write_access.append(service_name)
if permissions_management_actions:
for action in permissions_management_actions:
service_name = get_service_from_action(action)
if service_name not in service_prefixes_with_permissions_management_access:
service_prefixes_with_permissions_management_access.append(service_name)
all_modify_level_service_prefixes = service_prefixes_with_permissions_management_access + \
service_prefixes_with_write_access
# remove duplicates
all_modify_level_service_prefixes = list(dict.fromkeys(all_modify_level_service_prefixes))
return all_modify_level_service_prefixes
if __name__ == '__main__':
analyze_public_instances_results() | analyze_public_instances_results.py | import csv
import json
import os
import click
from os.path import isfile, isdir
from pathlib import Path
from policy_sentry.util.actions import get_service_from_action
from policy_sentry.analysis.analyze import analyze_by_access_level
from policy_sentry.shared.constants import DATABASE_FILE_PATH
from policy_sentry.shared.database import connect_db
from policy_sentry.util.file import list_files_in_directory
from policy_sentry.command.initialize import initialize
from common.awsinfo import is_aws_managed_policy
HOME = str(Path.home())
if isfile(HOME + '/.policy_sentry/aws.sqlite3'):
print("Policy Sentry database found. Continuing...")
else:
print("NOTE: Policy Sentry database not found. Initializing...")
initialize()
db_session = connect_db(DATABASE_FILE_PATH)
@click.command(
short_help='analyze the JSON formatted results of the find_public_instances_with_roles script.'
)
@click.option(
'--input-file',
type=click.Path(exists=True),
help='Path to the JSON file you want to analyze, or a directory of those files. '
'Defaults to the directory "./reports/accounts/"',
default=os.getcwd() + '/reports/accounts/'
)
@click.option(
'--output',
# required=False,
type=click.Path(exists=True),
default=os.getcwd() + '/reports/',
help='Directory to store the reports. Defaults to "/reports/"'
)
def analyze_public_instances_results(input_file, output):
all_results = []
if isfile(input_file):
account_alias, account_id, results = analyze_file(input_file)
print(json.dumps(results, indent=2))
elif isdir(input_file):
file_list = list_files_in_directory(input_file)
for file in file_list:
if file.endswith(".json"):
account_alias, account_id, results = analyze_file(input_file + file)
this_result = {
'account_id': account_id,
'account_alias': account_alias,
'results': results
}
all_results.append(this_result)
if isfile(output):
with open(output, 'w') as results_file:
json.dump(all_results, results_file, indent=4)
write_csv_report(all_results, output + 'results.csv')
elif isdir(output):
with open(output + 'results.json', 'w') as results_file:
json.dump(all_results, results_file, indent=4)
write_csv_report(all_results, output + 'results.csv')
def analyze_file(input):
with open(input, 'r') as json_file:
datastore = json.load(json_file)
base_name = os.path.basename(input)
base_name_no_extension = os.path.splitext(os.path.basename(base_name))[0]
account_id, roles_with_public_access = get_list_of_roles_in_account_with_public_access(base_name_no_extension,
datastore)
account_alias = base_name_no_extension
return account_alias, account_id, roles_with_public_access
def get_list_of_roles_in_account_with_public_access(account_id, json_data):
"""
Example results:
[
{
"role_name": "SomeRole",
"public_ips": 3,
"policies_count": 3,
"aws_managed_policies_count": 3,
"aws_managed_policies": [
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonS3FullAccess"
],
"custom_policies": [
"arn:aws:iam::012345678901:policy/Tupac-Is",
"arn:aws:iam::012345678901:policy/StillAlive"
]
},
{
"role_name": "SomeRole2",
"public_ips": 4,
"policies_count": 1,
"aws_managed_policies_count": 1,
"aws_managed_policies": [
"arn:aws:iam::aws:policy/AmazonS3FullAccess"
],
"customer_managed_policies": [
"arn:aws:iam::0123456789012:policy/JEpstein",
"arn:aws:iam::012345678901:policy/DidntHangHimself"
]
}
]
"""
roles = []
aws_managed_policies = []
customer_managed_policies = []
list_of_account_ids = json_data.keys() # should only be one item; lazy messy quick fix
for account_id in list_of_account_ids:
for region in json_data[account_id]:
for role in json_data[account_id][region]:
services_role_can_modify = []
aws_managed_policies_count = 0
role_permissions_management_abilities = []
role_write_abilities = []
for policy in role['policies']:
if is_aws_managed_policy(policy['policy_arn']) and policy['policy_arn'] not in aws_managed_policies:
aws_managed_policies_count += 1
aws_managed_policies.append(policy['policy_arn'])
permissions_management_abilities = has_permissions_management_access(policy['policy_document'])
if permissions_management_abilities:
role_permissions_management_abilities.extend(permissions_management_abilities)
write_abilities = has_write_access(policy['policy_document'])
if write_abilities:
role_write_abilities.extend(write_abilities)
services_role_can_modify = get_service_prefixes_role_can_modify(policy['policy_document'])
elif not is_aws_managed_policy(policy['policy_arn']) and policy['policy_arn'] not in \
customer_managed_policies:
customer_managed_policies.append(policy['policy_arn'])
permissions_management_abilities = has_permissions_management_access(policy['policy_document'])
if permissions_management_abilities:
role_permissions_management_abilities.extend(permissions_management_abilities)
write_abilities = has_write_access(policy['policy_document'])
if write_abilities:
role_write_abilities.extend(write_abilities)
services_role_can_modify = get_service_prefixes_role_can_modify(policy['policy_document'])
# else:
# pass
aws_managed_policies.sort()
customer_managed_policies.sort()
services_role_can_modify.sort()
role_permissions_management_abilities.sort()
role_write_abilities.sort()
this_role = {
'role_name': role['role_name'],
'public_ips': len(role['instances']), # The number of public IP addresses with that role
'policies_count': len(role['policies']),
'aws_managed_policies_count': aws_managed_policies_count,
'aws_managed_policies': aws_managed_policies,
'customer_managed_policies': customer_managed_policies,
'services_role_can_modify': services_role_can_modify,
'write_abilities': role_write_abilities,
'permissions_management_abilities': role_permissions_management_abilities
}
roles.append(this_role)
return account_id, roles
def write_csv_report(data, results_summary_file):
with open(results_summary_file, 'w') as csvfile:
fieldnames = ['account_id', 'account_alias', 'role_name', 'aws_managed_policies_count', 'public_ip_count',
'policies_count', 'permissions_management_abilities_count', 'write_abilities_count',
'services_role_can_modify_count', 'aws_managed_policies', 'customer_managed_policies',
'services_role_can_modify', 'write_abilities', 'permissions_management_abilities']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect='excel')
writer.writeheader()
for account in data:
account_id = account['account_id']
account_alias = account['account_alias']
for role in account['results']:
role_name = role['role_name']
public_ip_count = role['public_ips']
aws_managed_policies_count = role['aws_managed_policies_count']
policies_count = role['policies_count']
write_abilities_count = str(len(role['write_abilities']))
permissions_management_abilities_count = str(len(role['permissions_management_abilities']))
services_role_can_modify_count = str(len(role['services_role_can_modify']))
aws_managed_policies = role['aws_managed_policies']
customer_managed_policies = role['customer_managed_policies']
services_role_can_modify = role['services_role_can_modify']
write_abilities = role['write_abilities']
permissions_management_abilities = role['permissions_management_abilities']
aws_managed_policies_result = '; '
customer_managed_policies_result = '; '
services_role_can_modify_result = '; '
write_abilities_result = '; '
permissions_management_abilities_result = '; '
writer.writerow({
'account_id': account_id,
'account_alias': account_alias,
'role_name': role_name,
'aws_managed_policies_count': aws_managed_policies_count,
'public_ip_count': public_ip_count,
'policies_count': policies_count,
'permissions_management_abilities_count': permissions_management_abilities_count,
'write_abilities_count': write_abilities_count,
'services_role_can_modify_count': services_role_can_modify_count,
'aws_managed_policies': aws_managed_policies_result.join(aws_managed_policies),
'customer_managed_policies': customer_managed_policies_result.join(customer_managed_policies),
'services_role_can_modify': services_role_can_modify_result.join(services_role_can_modify),
'write_abilities': write_abilities_result.join(write_abilities),
'permissions_management_abilities': permissions_management_abilities_result.join(permissions_management_abilities)
})
print(f"Finished writing data for account {account_id}")
def has_permissions_management_access(policy):
"""
Given a policy as a dictionary, determine if the policy grants Permissions management access.
If so, return a list of IAM Actions that grant Permissions management access level. If not, return false.
"""
permissions_management_actions = analyze_by_access_level(policy, db_session, "permissions-management") # TODO: Test this.
if len(permissions_management_actions) > 0:
return permissions_management_actions
else:
return []
def has_write_access(policy):
"""
Given a policy as a dictionary, determine if the policy grants Permissions management access.
If so, return a list of IAM Actions that grant Permissions management access level. If not, return false.
"""
permissions_management_actions = analyze_by_access_level(policy, db_session, "permissions-management")
if len(permissions_management_actions) > 0:
return permissions_management_actions
else:
return []
def get_service_prefixes_role_can_modify(policy):
"""
Given a policy as a dictionary, determine what AWS services the policy grants at the write
or permissions management access levels.
"""
service_prefixes_with_write_access = []
service_prefixes_with_permissions_management_access = []
write_access_actions = has_write_access(policy) # TODO: Verify the accuracy of this.
permissions_management_actions = has_permissions_management_access(policy)
if write_access_actions:
for action in write_access_actions:
service_name = get_service_from_action(action)
if service_name not in service_prefixes_with_write_access:
service_prefixes_with_write_access.append(service_name)
if permissions_management_actions:
for action in permissions_management_actions:
service_name = get_service_from_action(action)
if service_name not in service_prefixes_with_permissions_management_access:
service_prefixes_with_permissions_management_access.append(service_name)
all_modify_level_service_prefixes = service_prefixes_with_permissions_management_access + \
service_prefixes_with_write_access
# remove duplicates
all_modify_level_service_prefixes = list(dict.fromkeys(all_modify_level_service_prefixes))
return all_modify_level_service_prefixes
if __name__ == '__main__':
analyze_public_instances_results() | 0.194597 | 0.067547 |
import json
import requests
from Logs.log_configuration import configure_logger
from api_client.url_helpers.organization_group_url import get_organization_group_children_url, \
get_parent_organization_group_url, get_organization_group_details_url
from models.api_header_model import RequestHeader
log = configure_logger('default')
def get_child_organization_group_list(organization_group_id):
"""
To get the list of child organization group for particular OG
:param organization_group_id: organization group id
:return: list of organization groups
"""
api_url = get_organization_group_children_url(organization_group_id)
headers = RequestHeader().header
try:
response = requests.get(api_url, headers=headers)
if not response.ok:
log.error(response.status_code, response.reason,
response.content) # HTTP
return response
else:
log.info(response.content)
response_data = json.loads(response.content)
return response_data
except Exception as e:
log.error('Child Organization Group Search failed for organization group id {} with exception {}'
.format(organization_group_id, str(e)))
return e
def get_parent_organization_group_uuid_list(organization_group_uuid):
"""
To get the list of parent organization group uuid for particular OG
:param organization_group_uuid: organization group uuid
:return: list of parent organization group uuid
"""
api_url = get_parent_organization_group_url(organization_group_uuid)
headers = RequestHeader().header
try:
response = requests.get(api_url, headers=headers)
if not response.ok:
log.error(response.status_code, response.reason,
response.content) # HTTP
return response
else:
log.info(response.content)
response_data = json.loads(response.content)
return response_data
except Exception as e:
log.error('Parent Organization Group Search failed for organization group uuid {} with exception {}: {}'
.format(organization_group_uuid, str(e)))
return e
def get_parent_organization_group_details(organization_group_uuid):
"""
To get the list of parent organization group uuid for particular OG
:param organization_group_uuid: organization group uuid
:return: list of parent organization group details
"""
api_url = get_organization_group_details_url(organization_group_uuid)
headers = RequestHeader().header
try:
response = requests.get(api_url, headers=headers)
if not response.ok:
log.error(response.status_code, response.reason,
response.content) # HTTP
return response
else:
log.info(response.content)
response_data = json.loads(response.content)
return response_data
except Exception as e:
log.error('Organization Group Details Search failed for organization group uuid {} with exception {}'
.format(organization_group_uuid, str(e)))
return e | UEM-Samples/ProductProvisioning/product_app_deployment_automation/api_client/organization_group.py | import json
import requests
from Logs.log_configuration import configure_logger
from api_client.url_helpers.organization_group_url import get_organization_group_children_url, \
get_parent_organization_group_url, get_organization_group_details_url
from models.api_header_model import RequestHeader
log = configure_logger('default')
def get_child_organization_group_list(organization_group_id):
"""
To get the list of child organization group for particular OG
:param organization_group_id: organization group id
:return: list of organization groups
"""
api_url = get_organization_group_children_url(organization_group_id)
headers = RequestHeader().header
try:
response = requests.get(api_url, headers=headers)
if not response.ok:
log.error(response.status_code, response.reason,
response.content) # HTTP
return response
else:
log.info(response.content)
response_data = json.loads(response.content)
return response_data
except Exception as e:
log.error('Child Organization Group Search failed for organization group id {} with exception {}'
.format(organization_group_id, str(e)))
return e
def get_parent_organization_group_uuid_list(organization_group_uuid):
"""
To get the list of parent organization group uuid for particular OG
:param organization_group_uuid: organization group uuid
:return: list of parent organization group uuid
"""
api_url = get_parent_organization_group_url(organization_group_uuid)
headers = RequestHeader().header
try:
response = requests.get(api_url, headers=headers)
if not response.ok:
log.error(response.status_code, response.reason,
response.content) # HTTP
return response
else:
log.info(response.content)
response_data = json.loads(response.content)
return response_data
except Exception as e:
log.error('Parent Organization Group Search failed for organization group uuid {} with exception {}: {}'
.format(organization_group_uuid, str(e)))
return e
def get_parent_organization_group_details(organization_group_uuid):
"""
To get the list of parent organization group uuid for particular OG
:param organization_group_uuid: organization group uuid
:return: list of parent organization group details
"""
api_url = get_organization_group_details_url(organization_group_uuid)
headers = RequestHeader().header
try:
response = requests.get(api_url, headers=headers)
if not response.ok:
log.error(response.status_code, response.reason,
response.content) # HTTP
return response
else:
log.info(response.content)
response_data = json.loads(response.content)
return response_data
except Exception as e:
log.error('Organization Group Details Search failed for organization group uuid {} with exception {}'
.format(organization_group_uuid, str(e)))
return e | 0.363873 | 0.066478 |
import json
from pprint import pformat
from celery import states
from celery.result import AsyncResult
from flask import abort
import config
from gcpdac.application_ci import create_application, delete_application
from gcpdac.celery_tasks import deploy_application_task, destroy_application_task
logger = config.logger
def create(applicationDetails):
logger.debug(pformat(applicationDetails))
result = create_application(applicationDetails)
if result.get("tf_return_code") == 0:
return result, 201
else:
abort(500, "Failed to deploy your application")
def delete(oid):
logger.debug("Id is {}".format(oid))
applicationDetails = {"id": oid}
result = delete_application(applicationDetails)
if result.get("tf_return_code") == 0:
return {}, 200
else:
abort(500, "Failed to delete your application")
def create_async(applicationDetails):
logger.debug(pformat(applicationDetails))
result = deploy_application_task.delay(applicationDetails=applicationDetails)
logger.info("Task ID %s", result.task_id)
context = {"taskid": result.task_id}
# TODO handle celery failure
success = True
if success == True:
return context, 201
else:
abort(500, "Failed to create your application")
def delete_async(oid):
logger.debug("Id is {}".format(oid))
applicationDetails = {"id": oid}
result = destroy_application_task.delay(applicationDetails=applicationDetails)
logger.info("Task ID %s", result.task_id)
context = {"taskid": result.task_id}
# TODO handle celery failure
success = True
if success == True:
return context, 201
else:
abort(500, "Failed to delete your application")
def create_application_result(taskid):
logger.info("CREATE application RESULT %s", format(taskid))
status = AsyncResult(taskid).status
if status == states.SUCCESS or status == states.FAILURE:
retval = AsyncResult(taskid).get(timeout=1.0)
return_code = retval["return_code"]
# tf_outputs = retval["tf_outputs"]
if return_code > 0:
status = states.FAILURE
payload = {}
else:
payload = {}
return {'status': status, "payload": json.dumps(payload)}
else:
return {'status': status}
def delete_application_result(taskid):
logger.info("DELETE application RESULT %s", format(taskid))
status = AsyncResult(taskid).status
if status == states.SUCCESS or status == states.FAILURE:
retval = AsyncResult(taskid).get(timeout=1.0)
return_code = retval["return_code"]
if return_code > 0:
status = states.FAILURE
return {'status': status, "return_code": return_code}
else:
return {'status': status} | gcpdac/application.py | import json
from pprint import pformat
from celery import states
from celery.result import AsyncResult
from flask import abort
import config
from gcpdac.application_ci import create_application, delete_application
from gcpdac.celery_tasks import deploy_application_task, destroy_application_task
logger = config.logger
def create(applicationDetails):
logger.debug(pformat(applicationDetails))
result = create_application(applicationDetails)
if result.get("tf_return_code") == 0:
return result, 201
else:
abort(500, "Failed to deploy your application")
def delete(oid):
logger.debug("Id is {}".format(oid))
applicationDetails = {"id": oid}
result = delete_application(applicationDetails)
if result.get("tf_return_code") == 0:
return {}, 200
else:
abort(500, "Failed to delete your application")
def create_async(applicationDetails):
logger.debug(pformat(applicationDetails))
result = deploy_application_task.delay(applicationDetails=applicationDetails)
logger.info("Task ID %s", result.task_id)
context = {"taskid": result.task_id}
# TODO handle celery failure
success = True
if success == True:
return context, 201
else:
abort(500, "Failed to create your application")
def delete_async(oid):
logger.debug("Id is {}".format(oid))
applicationDetails = {"id": oid}
result = destroy_application_task.delay(applicationDetails=applicationDetails)
logger.info("Task ID %s", result.task_id)
context = {"taskid": result.task_id}
# TODO handle celery failure
success = True
if success == True:
return context, 201
else:
abort(500, "Failed to delete your application")
def create_application_result(taskid):
logger.info("CREATE application RESULT %s", format(taskid))
status = AsyncResult(taskid).status
if status == states.SUCCESS or status == states.FAILURE:
retval = AsyncResult(taskid).get(timeout=1.0)
return_code = retval["return_code"]
# tf_outputs = retval["tf_outputs"]
if return_code > 0:
status = states.FAILURE
payload = {}
else:
payload = {}
return {'status': status, "payload": json.dumps(payload)}
else:
return {'status': status}
def delete_application_result(taskid):
logger.info("DELETE application RESULT %s", format(taskid))
status = AsyncResult(taskid).status
if status == states.SUCCESS or status == states.FAILURE:
retval = AsyncResult(taskid).get(timeout=1.0)
return_code = retval["return_code"]
if return_code > 0:
status = states.FAILURE
return {'status': status, "return_code": return_code}
else:
return {'status': status} | 0.113211 | 0.143668 |
from decimal import Decimal
from django.conf import settings
from products.models import Products
class Favorite(object):
def __init__(self, request):
"""
Initialize the favorite
"""
self.session = request.session
favorite = self.session.get(settings.FAVORITE_SESSION_ID)
if not favorite:
# Save empty favorite list in session
favorite = self.session[settings.FAVORITE_SESSION_ID] = {}
self.favorite = favorite
def __iter__(self):
"""
Iterate over the items in the favorite and get the products
from the database.
"""
product_ids = self.favorite.keys()
# get the product objects and add them to the cart
products = Products.objects.filter(id__in=product_ids)
favorite = self.favorite.copy()
for product in products:
favorite[str(product.id)]['product'] = product
for item in favorite.values():
yield item
def __len__(self):
"""
Count all items in the favorite list.
"""
return sum(item['quantity'] for item in self.favorite.values())
def add(self, product, quantity=1, override_quantity=False):
"""
Add a product to the favorite or update list quantity.
"""
product_id = str(product.id)
if product_id not in self.favorite:
self.favorite[product_id] = {"quantity": 0,
"retail_price_USD": str(product.retail_price_USD)}
if override_quantity:
self.favorite[product_id]["quantity"] = quantity
else:
self.favorite[product_id]["quantity"] += quantity
self.save()
def remove(self, product):
"""
Remove a product from the favorite list.
"""
product_id = str(product.id)
if product_id in self.favorite:
del self.favorite[product_id]
self.save()
def get_total_retail_price_USD(self):
return sum(Decimal(item['retail_price_USD']) * item['quantity'] for item in self.favorite.values())
def clear(self):
# remove favorite from session
del self.session[settings.FAVORITER_SESSION_ID]
self.save()
def save(self):
self.session.modified = True | favorite/favorite.py | from decimal import Decimal
from django.conf import settings
from products.models import Products
class Favorite(object):
def __init__(self, request):
"""
Initialize the favorite
"""
self.session = request.session
favorite = self.session.get(settings.FAVORITE_SESSION_ID)
if not favorite:
# Save empty favorite list in session
favorite = self.session[settings.FAVORITE_SESSION_ID] = {}
self.favorite = favorite
def __iter__(self):
"""
Iterate over the items in the favorite and get the products
from the database.
"""
product_ids = self.favorite.keys()
# get the product objects and add them to the cart
products = Products.objects.filter(id__in=product_ids)
favorite = self.favorite.copy()
for product in products:
favorite[str(product.id)]['product'] = product
for item in favorite.values():
yield item
def __len__(self):
"""
Count all items in the favorite list.
"""
return sum(item['quantity'] for item in self.favorite.values())
def add(self, product, quantity=1, override_quantity=False):
"""
Add a product to the favorite or update list quantity.
"""
product_id = str(product.id)
if product_id not in self.favorite:
self.favorite[product_id] = {"quantity": 0,
"retail_price_USD": str(product.retail_price_USD)}
if override_quantity:
self.favorite[product_id]["quantity"] = quantity
else:
self.favorite[product_id]["quantity"] += quantity
self.save()
def remove(self, product):
"""
Remove a product from the favorite list.
"""
product_id = str(product.id)
if product_id in self.favorite:
del self.favorite[product_id]
self.save()
def get_total_retail_price_USD(self):
return sum(Decimal(item['retail_price_USD']) * item['quantity'] for item in self.favorite.values())
def clear(self):
# remove favorite from session
del self.session[settings.FAVORITER_SESSION_ID]
self.save()
def save(self):
self.session.modified = True | 0.557604 | 0.131312 |
from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread
import time
from person import Person
# GLOBAL CONSTANTS (Sabitler)
HOST = "localhost"
PORT = 5500
ADDR = (HOST, PORT)
MAX_CONNECTIONS = 10
BUFSIZE = 512 # aktarılan data'nın byte sayısı
TIME = time.ctime(time.time())
# GLOBAL VARIABLES (Değişkenler)
persons = []
server = socket(AF_INET, SOCK_STREAM) # AF_INET -> IPv4 tipinde '192.168.3.11' gibi adresler. # SOCK_STREAM -> Tipi.
server.bind(ADDR) # server kurulur
def broadcast(msg, name):
"""
Tüm kullanıcılara mesaj yollar.
:param msg: bytes["utf8"]
:param name: str
:return: None
"""
for person in persons:
client = person.client
try:
client.send(bytes(f"{name}", "utf8") + msg)
except Exception as e:
print("[HATA]", e)
def client_communication(person):
"""
Kullanıcılardan gelen mesajları tutar.
:param person: Person
:return: None
"""
client = person.client
# ilk gelen mesajının, kişinin ismi olmasını sağlıyor
name = client.recv(BUFSIZE).decode("utf8")
person.set_name(name)
msg = bytes(f"{name} katıldı!", "utf8")
broadcast(msg, "") # katıldı duyurusu | send() edildi
while True: # kullanıcıdan gelecek mesajı bekler
msg = client.recv(BUFSIZE) # msg -> bytes | client tarafından gelen mesajı tutar
if msg == bytes("{quit}", "utf8"): # mesaj 'quit' olduğunda, o kişi sohbet'ten çıkar
client.close()
persons.remove(person)
broadcast(bytes(f"{name} sohbeti terk etti...", "utf8"), "")
print(f"[TERK] {name} ayrıldı.")
break
else: # aksi takdirde, diğer kullanıcılarla sohbet'e devam eder
broadcast(msg, name+": ") # isim: mesaj (client) | send() edildi
print(f"{name}:", msg.decode("utf8")) # isim: mesaj (server) | msg -> str
def wait_for_connection():
"""
Yeni katılacak olan kullanıcıların bağlantılarını bekler.
:return: None
"""
while True: # herhangi bir bağlantıyı bekler
try:
client, addr = server.accept() # accept() -> (socket objesi, adres bilgisi)
person = Person(addr, client) # bağlantı için yeni bir kişi objesi oluşturur
persons.append(person)
print(f"[BAĞLANTI] {person.addr}, server'a bağlandı ({TIME}).")
Thread(target=client_communication, args=(person,)).start()
except Exception as e:
print("[HATA]", e)
break
print("SERVER HATASI")
if __name__ == '__main__':
server.listen(MAX_CONNECTIONS) # bağlantıları dinlemek için server'ı açar
print("Bağlantılar bekleniyor...")
server_thread = Thread(target=wait_for_connection)
server_thread.start()
server_thread.join() # thread terminate edilene kadar bekletilir
server.close() | server/server.py | from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread
import time
from person import Person
# GLOBAL CONSTANTS (Sabitler)
HOST = "localhost"
PORT = 5500
ADDR = (HOST, PORT)
MAX_CONNECTIONS = 10
BUFSIZE = 512 # aktarılan data'nın byte sayısı
TIME = time.ctime(time.time())
# GLOBAL VARIABLES (Değişkenler)
persons = []
server = socket(AF_INET, SOCK_STREAM) # AF_INET -> IPv4 tipinde '192.168.3.11' gibi adresler. # SOCK_STREAM -> Tipi.
server.bind(ADDR) # server kurulur
def broadcast(msg, name):
"""
Tüm kullanıcılara mesaj yollar.
:param msg: bytes["utf8"]
:param name: str
:return: None
"""
for person in persons:
client = person.client
try:
client.send(bytes(f"{name}", "utf8") + msg)
except Exception as e:
print("[HATA]", e)
def client_communication(person):
"""
Kullanıcılardan gelen mesajları tutar.
:param person: Person
:return: None
"""
client = person.client
# ilk gelen mesajının, kişinin ismi olmasını sağlıyor
name = client.recv(BUFSIZE).decode("utf8")
person.set_name(name)
msg = bytes(f"{name} katıldı!", "utf8")
broadcast(msg, "") # katıldı duyurusu | send() edildi
while True: # kullanıcıdan gelecek mesajı bekler
msg = client.recv(BUFSIZE) # msg -> bytes | client tarafından gelen mesajı tutar
if msg == bytes("{quit}", "utf8"): # mesaj 'quit' olduğunda, o kişi sohbet'ten çıkar
client.close()
persons.remove(person)
broadcast(bytes(f"{name} sohbeti terk etti...", "utf8"), "")
print(f"[TERK] {name} ayrıldı.")
break
else: # aksi takdirde, diğer kullanıcılarla sohbet'e devam eder
broadcast(msg, name+": ") # isim: mesaj (client) | send() edildi
print(f"{name}:", msg.decode("utf8")) # isim: mesaj (server) | msg -> str
def wait_for_connection():
"""
Yeni katılacak olan kullanıcıların bağlantılarını bekler.
:return: None
"""
while True: # herhangi bir bağlantıyı bekler
try:
client, addr = server.accept() # accept() -> (socket objesi, adres bilgisi)
person = Person(addr, client) # bağlantı için yeni bir kişi objesi oluşturur
persons.append(person)
print(f"[BAĞLANTI] {person.addr}, server'a bağlandı ({TIME}).")
Thread(target=client_communication, args=(person,)).start()
except Exception as e:
print("[HATA]", e)
break
print("SERVER HATASI")
if __name__ == '__main__':
server.listen(MAX_CONNECTIONS) # bağlantıları dinlemek için server'ı açar
print("Bağlantılar bekleniyor...")
server_thread = Thread(target=wait_for_connection)
server_thread.start()
server_thread.join() # thread terminate edilene kadar bekletilir
server.close() | 0.203668 | 0.088347 |
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
class AvailabilityZone(CloudFormationLintRule):
"""Check Availibility Zone parameter checks """
id = 'W2508'
shortdesc = 'Availability Zone Parameters are of correct type AWS::EC2::AvailabilityZone::Name'
description = 'Check if a parameter is being used in a resource for Security ' \
'Group. If it is make sure it is of type AWS::EC2::AvailabilityZone::Name'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'
tags = ['parameters', 'availabilityzone']
def __init__(self):
"""Init"""
super(AvailabilityZone, self).__init__()
self.multiple_resource_type_specs = [
'AWS::DAX::Cluster',
'AWS::AutoScaling::AutoScalingGroup',
'AWS::RDS::DBCluster',
'AWS::ElasticLoadBalancing::LoadBalancer',
]
self.singular_resource_type_specs = [
'AWS::OpsWorks::Instance',
'AWS::RDS::DBInstance',
'AWS::EC2::Host',
'AWS::EC2::Subnet',
'AWS::DMS::ReplicationInstance',
'AWS::EC2::Instance'
]
self.singular_property_type_specs = [
# Singular
'AWS::EC2::LaunchTemplate.Placement',
'AWS::EC2::SpotFleet.SpotPlacement',
'AWS::EMR::Cluster.PlacementType',
'AWS::Glue::Connection.PhysicalConnectionRequirements',
'AWS::ElasticLoadBalancingV2::TargetGroup.TargetDescription',
'AWS::EC2::SpotFleet.LaunchTemplateOverrides',
]
for resoruce_type_spec in self.singular_resource_type_specs:
self.resource_property_types.append(resoruce_type_spec)
for resoruce_type_spec in self.multiple_resource_type_specs:
self.resource_property_types.append(resoruce_type_spec)
for property_type_spec in self.singular_property_type_specs:
self.resource_sub_property_types.append(property_type_spec)
# pylint: disable=W0613
def check_az_ref(self, value, path, parameters, resources):
"""Check ref for VPC"""
matches = []
if 'AvailabilityZone' in path:
allowed_types = [
'AWS::SSM::Parameter::Value<AWS::EC2::AvailabilityZone::Name>',
'AWS::EC2::AvailabilityZone::Name'
]
elif isinstance(path[-2], int):
allowed_types = [
'AWS::SSM::Parameter::Value<AWS::EC2::AvailabilityZone::Name>',
'AWS::EC2::AvailabilityZone::Name'
]
else:
allowed_types = [
'AWS::SSM::Parameter::Value<List<AWS::EC2::AvailabilityZone::Name>>',
'List<AWS::EC2::AvailabilityZone::Name>'
]
if value in parameters:
parameter_properties = parameters.get(value)
parameter_type = parameter_properties.get('Type')
if parameter_type not in allowed_types:
path_error = ['Parameters', value, 'Type']
message = 'Availability Zone Parameter should be of type [{0}] for {1}'
matches.append(
RuleMatch(
path_error,
message.format(
', '.join(map(str, allowed_types)),
'/'.join(map(str, path_error)))))
return matches
def check(self, properties, resource_type, path, cfn):
"""Check itself"""
matches = []
matches.extend(
cfn.check_value(
properties, 'AvailabilityZone', path,
check_value=None, check_ref=self.check_az_ref,
check_find_in_map=None, check_split=None, check_join=None
)
)
matches.extend(
cfn.check_value(
properties, 'AvailabilityZones', path,
check_value=None, check_ref=self.check_az_ref,
check_find_in_map=None, check_split=None, check_join=None
)
)
return matches
def match_resource_sub_properties(self, properties, property_type, path, cfn):
"""Match for sub properties"""
matches = []
matches.extend(self.check(properties, property_type, path, cfn))
return matches
def match_resource_properties(self, properties, resource_type, path, cfn):
"""Check CloudFormation Properties"""
matches = []
matches.extend(self.check(properties, resource_type, path, cfn))
return matches | src/cfnlint/rules/parameters/AvailabilityZone.py | from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
class AvailabilityZone(CloudFormationLintRule):
"""Check Availibility Zone parameter checks """
id = 'W2508'
shortdesc = 'Availability Zone Parameters are of correct type AWS::EC2::AvailabilityZone::Name'
description = 'Check if a parameter is being used in a resource for Security ' \
'Group. If it is make sure it is of type AWS::EC2::AvailabilityZone::Name'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'
tags = ['parameters', 'availabilityzone']
def __init__(self):
"""Init"""
super(AvailabilityZone, self).__init__()
self.multiple_resource_type_specs = [
'AWS::DAX::Cluster',
'AWS::AutoScaling::AutoScalingGroup',
'AWS::RDS::DBCluster',
'AWS::ElasticLoadBalancing::LoadBalancer',
]
self.singular_resource_type_specs = [
'AWS::OpsWorks::Instance',
'AWS::RDS::DBInstance',
'AWS::EC2::Host',
'AWS::EC2::Subnet',
'AWS::DMS::ReplicationInstance',
'AWS::EC2::Instance'
]
self.singular_property_type_specs = [
# Singular
'AWS::EC2::LaunchTemplate.Placement',
'AWS::EC2::SpotFleet.SpotPlacement',
'AWS::EMR::Cluster.PlacementType',
'AWS::Glue::Connection.PhysicalConnectionRequirements',
'AWS::ElasticLoadBalancingV2::TargetGroup.TargetDescription',
'AWS::EC2::SpotFleet.LaunchTemplateOverrides',
]
for resoruce_type_spec in self.singular_resource_type_specs:
self.resource_property_types.append(resoruce_type_spec)
for resoruce_type_spec in self.multiple_resource_type_specs:
self.resource_property_types.append(resoruce_type_spec)
for property_type_spec in self.singular_property_type_specs:
self.resource_sub_property_types.append(property_type_spec)
# pylint: disable=W0613
def check_az_ref(self, value, path, parameters, resources):
"""Check ref for VPC"""
matches = []
if 'AvailabilityZone' in path:
allowed_types = [
'AWS::SSM::Parameter::Value<AWS::EC2::AvailabilityZone::Name>',
'AWS::EC2::AvailabilityZone::Name'
]
elif isinstance(path[-2], int):
allowed_types = [
'AWS::SSM::Parameter::Value<AWS::EC2::AvailabilityZone::Name>',
'AWS::EC2::AvailabilityZone::Name'
]
else:
allowed_types = [
'AWS::SSM::Parameter::Value<List<AWS::EC2::AvailabilityZone::Name>>',
'List<AWS::EC2::AvailabilityZone::Name>'
]
if value in parameters:
parameter_properties = parameters.get(value)
parameter_type = parameter_properties.get('Type')
if parameter_type not in allowed_types:
path_error = ['Parameters', value, 'Type']
message = 'Availability Zone Parameter should be of type [{0}] for {1}'
matches.append(
RuleMatch(
path_error,
message.format(
', '.join(map(str, allowed_types)),
'/'.join(map(str, path_error)))))
return matches
def check(self, properties, resource_type, path, cfn):
"""Check itself"""
matches = []
matches.extend(
cfn.check_value(
properties, 'AvailabilityZone', path,
check_value=None, check_ref=self.check_az_ref,
check_find_in_map=None, check_split=None, check_join=None
)
)
matches.extend(
cfn.check_value(
properties, 'AvailabilityZones', path,
check_value=None, check_ref=self.check_az_ref,
check_find_in_map=None, check_split=None, check_join=None
)
)
return matches
def match_resource_sub_properties(self, properties, property_type, path, cfn):
"""Match for sub properties"""
matches = []
matches.extend(self.check(properties, property_type, path, cfn))
return matches
def match_resource_properties(self, properties, resource_type, path, cfn):
"""Check CloudFormation Properties"""
matches = []
matches.extend(self.check(properties, resource_type, path, cfn))
return matches | 0.734881 | 0.141193 |
import logging
import torch
import argparse
import os
from sklearn import metrics
from timeit import default_timer as timer
from utils import getModel, resumeFromPath
from data.ImagenetDataset import get_zipped_dataloaders, REDUCED_SET_PATH, FULL_SET_PATH
from data.utils import getLabelToClassMapping
from typing import Tuple, List
from benchmark import storeReportToCSV
parser = argparse.ArgumentParser(description='Benchmark MSDNet variants.')
parser.add_argument('--batch_size', metavar='N', type=int, default=1, help='Batchsize for training or validation run.')
parser.add_argument('--bench_type', type=str, default=None, choices=['quality', 'speed', 'report'], help='Execute only the specfied benchmark type.')
parser.add_argument('--runs', metavar='N', type=int, default=30, help='Number of runs to collect data for each item to benchmark.')
parser.add_argument('--data_root', type=str, default=FULL_SET_PATH, help='Root path for a prepared zipped dataset.')
parser.add_argument('--report_path', type=str, default=os.path.join(os.getcwd(), 'reports'), help='Root path for storing reports.')
parser.add_argument('--state_path', type=str, default=os.path.join(os.getcwd(), 'state'), help='Absolute path to the directory containing checkpoints for the model.')
def runSpeedBench(args, arch: str, max_classifications: int) ->float:
model = getModel(arch)
model.setMaxClassifiers(max_classifications)
tensor = torch.rand(1, 3, 224, 224)
# warmup
temp_res = model(tensor)
start = timer()
temp_res = model(tensor)
end = timer()
return (end - start) * 1000
def executeSpeedBench(args, model_max: Tuple[str, int]):
for arch, max_pred in model_max:
measurements = {'classifier': [], 'arch': [], 'time': []}
for max_classifications in range(1, max_pred + 1):
for _ in range(args.runs):
arch_name = f'{arch}{max_pred}'
result = runSpeedBench(args, arch_name, max_classifications)
measurements['classifier'].append(max_classifications)
measurements['arch'].append(arch_name)
measurements['time'].append(result)
storeReportToCSV(
os.path.join(os.getcwd(), args.report_path),
f'speed-{arch_name}-none-run.csv',
measurements)
def evaluateModel(args, model, loader, classes) -> (List[float], List[float]):
pred, grndT = [], []
for (images, labels) in iter(loader):
if torch.cuda.is_available():
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
outputs = model(images)
if isinstance(outputs, List):
outputs = outputs[-1]
_, predicted = torch.max(outputs, 1)
pred = pred + [classes[predicted[k]] for k in range(min(args.batch_size, labels.shape[0]))]
grndT = grndT + [classes[labels[j]] for j in range(min(args.batch_size, labels.shape[0]))]
return grndT, pred
def getDataLoader(args):
_, _, loader = get_zipped_dataloaders(args.data_root, args.batch_size, use_valid=True)
return loader
def getClassificationValues(predT, grndT) -> (float, float, float, float):
return (
metrics.accuracy_score(grndT, predT),
metrics.precision_score(grndT, predT, average='macro'),
metrics.recall_score(grndT, predT, average='macro'),
metrics.f1_score(grndT, predT, average='macro')
)
def runQualityBench(args, arch_name: str, max_classification: int, loader) -> (float, float, float, float):
loader = getDataLoader(args)
label_to_classes = getLabelToClassMapping(os.path.join(os.getcwd(), args.data_root))
model = getModel(arch_name)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda()
model.setMaxClassifiers(max_classification)
model, _ , _, _ = resumeFromPath(os.path.join(args.state_path, f'{arch_name}_model_best.pth.tar'), model)
model.eval()
predT, grndT = evaluateModel(args, model, loader, label_to_classes)
return getClassificationValues(predT, grndT)
def executeQualityBench(args, model_max: Tuple[str, int]):
loader = getDataLoader(args)
for arch, max_classifications in model_max:
stats = {'classifier': [], 'arch': [], 'acc': [], 'prec': [], 'rec': [], 'f1': []}
for max_cls in range(1, max_classifications + 1):
arch_name = f'{arch}{max_classifications}'
acc, prec, rec, f1 = runQualityBench(args, arch_name, max_cls, loader)
stats['classifier'].append(max_cls)
stats['arch'].append(arch_name)
stats['acc'].append(acc)
stats['prec'].append(prec)
stats['rec'].append(rec)
stats['f1'].append(f1)
storeReportToCSV(args.report_path, f'quality-{arch_name}-run.csv', stats)
def executeClassificationReportBench(args, model_max: Tuple[str, int]):
loader = getDataLoader(args)
arch_name = f'{model_max[0]}{model_max[1]}'
label_to_classes = getLabelToClassMapping(os.path.join(os.getcwd(), args.data_root))
model = getModel(arch_name)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda()
model, _, _, _ = resumeFromPath(os.path.join(args.state_path, f'{arch_name}_model_best.pth.tar'), model)
stats = {
'classifier': [],
'arch': [],
'acc': [],
'prec': [],
'rec': [],
'f1': [],
'ground_truth': [],
'prediction': []
}
for max_classifications in range(1, model_max[1] + 1):
logging.info(f'Running with Classification on Layer {max_classifications}')
model.setMaxClassifiers(max_classifications)
model.eval()
pred, grndT = evaluateModel(args, model, loader, label_to_classes)
acc, prec, rec, f1 = getClassificationValues(pred, grndT)
stats['classifier'].append(max_classifications)
stats['arch'].append(arch_name)
stats['acc'].append(acc)
stats['prec'].append(prec)
stats['rec'].append(rec)
stats['f1'].append(f1)
stats['ground_truth'].append(grndT)
stats['prediction'].append(pred)
storeReportToCSV(args.report_path, f'report-{arch_name}-run.csv', stats)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
model_and_max = [('msdnet', 4), ('msdnet', 5), ('msdnet', 10)]
print(args)
print(model_and_max)
if args.bench_type is None or args.bench_type == 'speed':
executeSpeedBench(args, model_and_max)
if args.bench_type is None or args.bench_type == 'quality':
executeQualityBench(args, model_and_max)
if args.bench_type is not None and args.bench_type == 'report':
executeClassificationReportBench(args, model_and_max[1]) | benchmarkMSD.py | import logging
import torch
import argparse
import os
from sklearn import metrics
from timeit import default_timer as timer
from utils import getModel, resumeFromPath
from data.ImagenetDataset import get_zipped_dataloaders, REDUCED_SET_PATH, FULL_SET_PATH
from data.utils import getLabelToClassMapping
from typing import Tuple, List
from benchmark import storeReportToCSV
parser = argparse.ArgumentParser(description='Benchmark MSDNet variants.')
parser.add_argument('--batch_size', metavar='N', type=int, default=1, help='Batchsize for training or validation run.')
parser.add_argument('--bench_type', type=str, default=None, choices=['quality', 'speed', 'report'], help='Execute only the specfied benchmark type.')
parser.add_argument('--runs', metavar='N', type=int, default=30, help='Number of runs to collect data for each item to benchmark.')
parser.add_argument('--data_root', type=str, default=FULL_SET_PATH, help='Root path for a prepared zipped dataset.')
parser.add_argument('--report_path', type=str, default=os.path.join(os.getcwd(), 'reports'), help='Root path for storing reports.')
parser.add_argument('--state_path', type=str, default=os.path.join(os.getcwd(), 'state'), help='Absolute path to the directory containing checkpoints for the model.')
def runSpeedBench(args, arch: str, max_classifications: int) ->float:
model = getModel(arch)
model.setMaxClassifiers(max_classifications)
tensor = torch.rand(1, 3, 224, 224)
# warmup
temp_res = model(tensor)
start = timer()
temp_res = model(tensor)
end = timer()
return (end - start) * 1000
def executeSpeedBench(args, model_max: Tuple[str, int]):
for arch, max_pred in model_max:
measurements = {'classifier': [], 'arch': [], 'time': []}
for max_classifications in range(1, max_pred + 1):
for _ in range(args.runs):
arch_name = f'{arch}{max_pred}'
result = runSpeedBench(args, arch_name, max_classifications)
measurements['classifier'].append(max_classifications)
measurements['arch'].append(arch_name)
measurements['time'].append(result)
storeReportToCSV(
os.path.join(os.getcwd(), args.report_path),
f'speed-{arch_name}-none-run.csv',
measurements)
def evaluateModel(args, model, loader, classes) -> (List[float], List[float]):
pred, grndT = [], []
for (images, labels) in iter(loader):
if torch.cuda.is_available():
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
outputs = model(images)
if isinstance(outputs, List):
outputs = outputs[-1]
_, predicted = torch.max(outputs, 1)
pred = pred + [classes[predicted[k]] for k in range(min(args.batch_size, labels.shape[0]))]
grndT = grndT + [classes[labels[j]] for j in range(min(args.batch_size, labels.shape[0]))]
return grndT, pred
def getDataLoader(args):
_, _, loader = get_zipped_dataloaders(args.data_root, args.batch_size, use_valid=True)
return loader
def getClassificationValues(predT, grndT) -> (float, float, float, float):
return (
metrics.accuracy_score(grndT, predT),
metrics.precision_score(grndT, predT, average='macro'),
metrics.recall_score(grndT, predT, average='macro'),
metrics.f1_score(grndT, predT, average='macro')
)
def runQualityBench(args, arch_name: str, max_classification: int, loader) -> (float, float, float, float):
loader = getDataLoader(args)
label_to_classes = getLabelToClassMapping(os.path.join(os.getcwd(), args.data_root))
model = getModel(arch_name)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda()
model.setMaxClassifiers(max_classification)
model, _ , _, _ = resumeFromPath(os.path.join(args.state_path, f'{arch_name}_model_best.pth.tar'), model)
model.eval()
predT, grndT = evaluateModel(args, model, loader, label_to_classes)
return getClassificationValues(predT, grndT)
def executeQualityBench(args, model_max: Tuple[str, int]):
loader = getDataLoader(args)
for arch, max_classifications in model_max:
stats = {'classifier': [], 'arch': [], 'acc': [], 'prec': [], 'rec': [], 'f1': []}
for max_cls in range(1, max_classifications + 1):
arch_name = f'{arch}{max_classifications}'
acc, prec, rec, f1 = runQualityBench(args, arch_name, max_cls, loader)
stats['classifier'].append(max_cls)
stats['arch'].append(arch_name)
stats['acc'].append(acc)
stats['prec'].append(prec)
stats['rec'].append(rec)
stats['f1'].append(f1)
storeReportToCSV(args.report_path, f'quality-{arch_name}-run.csv', stats)
def executeClassificationReportBench(args, model_max: Tuple[str, int]):
loader = getDataLoader(args)
arch_name = f'{model_max[0]}{model_max[1]}'
label_to_classes = getLabelToClassMapping(os.path.join(os.getcwd(), args.data_root))
model = getModel(arch_name)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda()
model, _, _, _ = resumeFromPath(os.path.join(args.state_path, f'{arch_name}_model_best.pth.tar'), model)
stats = {
'classifier': [],
'arch': [],
'acc': [],
'prec': [],
'rec': [],
'f1': [],
'ground_truth': [],
'prediction': []
}
for max_classifications in range(1, model_max[1] + 1):
logging.info(f'Running with Classification on Layer {max_classifications}')
model.setMaxClassifiers(max_classifications)
model.eval()
pred, grndT = evaluateModel(args, model, loader, label_to_classes)
acc, prec, rec, f1 = getClassificationValues(pred, grndT)
stats['classifier'].append(max_classifications)
stats['arch'].append(arch_name)
stats['acc'].append(acc)
stats['prec'].append(prec)
stats['rec'].append(rec)
stats['f1'].append(f1)
stats['ground_truth'].append(grndT)
stats['prediction'].append(pred)
storeReportToCSV(args.report_path, f'report-{arch_name}-run.csv', stats)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
model_and_max = [('msdnet', 4), ('msdnet', 5), ('msdnet', 10)]
print(args)
print(model_and_max)
if args.bench_type is None or args.bench_type == 'speed':
executeSpeedBench(args, model_and_max)
if args.bench_type is None or args.bench_type == 'quality':
executeQualityBench(args, model_and_max)
if args.bench_type is not None and args.bench_type == 'report':
executeClassificationReportBench(args, model_and_max[1]) | 0.695545 | 0.19758 |
CREATE_TABLES = """
DROP TABLE IF EXISTS staging_yellow_trips;
CREATE TABLE IF NOT EXISTS staging_yellow_trips (
VendorID INT,
tpep_pickup_datetime TIMESTAMP,
tpep_dropoff_datetime TIMESTAMP,
passenger_count INT,
trip_distance FLOAT,
RatecodeID INT,
store_and_fwd_flag TEXT,
PULocationID INT,
DOLocationID INT,
payment_type FLOAT,
fare_amount FLOAT,
extra FLOAT,
mta_tax FLOAT,
tip_amount FLOAT,
tolls_amount FLOAT,
improvement_surcharge FLOAT,
total_amount FLOAT,
congestion_surcharge FLOAT
);
DROP TABLE IF EXISTS trips_lookup;
CREATE TABLE IF NOT EXISTS trips_lookup (
LocationID INT PRIMARY KEY,
Borough TEXT,
Zone TEXT,
service_zone TEXT
);
DROP TABLE IF EXISTS trips;
CREATE TABLE IF NOT EXISTS trips (
ID SERIAL PRIMARY KEY,
Date TEXT,
PULocationID INT,
DOLocationID INT,
passenger_count INT
);
DROP TABLE IF EXISTS pop_destination_passengers_month;
CREATE TABLE IF NOT EXISTS pop_destination_passengers_month(
month TEXT,
pick_up TEXT,
drop_off TEXT,
total_passengers INT,
ranking INT);
DROP TABLE IF EXISTS pop_destination_rides_month;
CREATE TABLE IF NOT EXISTS pop_destination_rides_month(
month TEXT,
pick_up TEXT,
drop_off TEXT,
total_rides TEXT,
ranking INT);
DROP TABLE IF EXISTS popular_rides_full;
CREATE TABLE IF NOT EXISTS popular_rides_full (
month TEXT,
pick_up TEXT,
drop_off TEXT,
ranking INT
);
DROP TABLE IF EXISTS cur_popular_dest;
CREATE TABLE IF NOT EXISTS cur_popular_dest (
pick_up TEXT,
drop_off TEXT,
ranking INT
);
"""
LOAD_TRIPS = """
DELETE FROM trips;
INSERT INTO trips (Date, PULocationID, DOLocationID, passenger_count)
SELECT
to_char(tpep_pickup_datetime, 'YYYY-MM'),
PULocationID,
DOLocationID,
passenger_count
FROM staging_yellow_trips;
"""
COPY_SQL = """
COPY {}
FROM '{}'
WITH DELIMITER ','
HEADER
"""
COPY_STAGING_YELLOW = COPY_SQL.format(
"staging_yellow_trips",
"./data/yellow*"
)
CALC_pop_destination_passengers_month = """
--DELETE FROM pop_destination_passengers_month;
WITH total_passengers AS (
SELECT
t.Date as month,
p.zone as pick_up,
d.zone as drop_off,
sum(t.passenger_count) total_passengers
FROM trips t
LEFT JOIN trips_lookup p
ON p.locationid = t.PULocationID
LEFT JOIN trips_lookup d
ON d.locationid = t.DOLocationID
WHERE t.Date = '{}'
GROUP BY t.Date, p.zone, d.zone
),
ranked_total_passengers AS (
SELECT
*,
rank() OVER (PARTITION BY pick_up ORDER BY total_passengers DESC) as ranking
FROM total_passengers
)
INSERT INTO pop_destination_passengers_month
SELECT
*
FROM ranked_total_passengers
WHERE ranking <= 5;
"""
CALC_pop_destination_rides_month = """
DELETE FROM pop_destination_rides_month;
WITH total_rides AS (
SELECT
t.Date as month,
p.Borough as pick_up,
d.Borough as drop_off,
count(t.ID) total_rides
FROM trips t
LEFT JOIN trips_lookup p
ON p.locationid = t.PULocationID
LEFT JOIN trips_lookup d
ON d.locationid = t.DOLocationID
WHERE t.Date = '2019-03'
GROUP BY t.Date, p.Borough, d.Borough
),
ranked_borough_destination AS (
SELECT
*,
rank() OVER (PARTITION BY pick_up ORDER BY total_rides DESC) as ranking
FROM total_rides
)
INSERT INTO pop_destination_rides_month
SELECT
*
FROM ranked_borough_destination;
"""
Load_popular_rides_full = """
WITH total_rides AS (
SELECT
t.Date as month,
p.Borough as pick_up,
d.Borough as drop_off,
count(t.ID) total_rides
FROM trips t
LEFT JOIN trips_lookup p
ON p.locationid = t.PULocationID
LEFT JOIN trips_lookup d
ON d.locationid = t.DOLocationID
WHERE t.Date = '{}'
GROUP BY t.Date, p.Borough, d.Borough
),
ranked_borough_destination AS (
SELECT
month,
pick_up,
drop_off,
rank() OVER (PARTITION BY pick_up ORDER BY total_rides DESC) as ranking
FROM total_rides
),
prev_rank AS (
SELECT
*
FROM popular_rides_full
WHERE month = '{}'
)
INSERT INTO popular_rides_full
SELECT
current.*
FROM ranked_borough_destination current
LEFT JOIN prev_rank
ON prev_rank.pick_up = current.pick_up
AND prev_rank.drop_off = current.drop_off
AND prev_rank.ranking = current.ranking
WHERE prev_rank.ranking IS NULL
AND current.ranking <= 10
;
"""
CALC_current_pop_dest = """
DELETE FROM cur_popular_dest;
WITH total_rides AS (
SELECT
t.Date as month,
p.Borough as pick_up,
d.Borough as drop_off,
count(t.ID) total_rides
FROM trips t
LEFT JOIN trips_lookup p
ON p.locationid = t.PULocationID
LEFT JOIN trips_lookup d
ON d.locationid = t.DOLocationID
WHERE t.Date = '{}'
GROUP BY t.Date, p.Borough, d.Borough
),
ranked_borough_destination AS (
SELECT
pick_up,
drop_off,
rank() OVER (PARTITION BY pick_up ORDER BY total_rides DESC) as ranking
FROM total_rides
)
INSERT INTO cur_popular_dest
SELECT
*
FROM ranked_borough_destination
WHERE ranking <= 10;
""" | scripts/sql_queries.py | CREATE_TABLES = """
DROP TABLE IF EXISTS staging_yellow_trips;
CREATE TABLE IF NOT EXISTS staging_yellow_trips (
VendorID INT,
tpep_pickup_datetime TIMESTAMP,
tpep_dropoff_datetime TIMESTAMP,
passenger_count INT,
trip_distance FLOAT,
RatecodeID INT,
store_and_fwd_flag TEXT,
PULocationID INT,
DOLocationID INT,
payment_type FLOAT,
fare_amount FLOAT,
extra FLOAT,
mta_tax FLOAT,
tip_amount FLOAT,
tolls_amount FLOAT,
improvement_surcharge FLOAT,
total_amount FLOAT,
congestion_surcharge FLOAT
);
DROP TABLE IF EXISTS trips_lookup;
CREATE TABLE IF NOT EXISTS trips_lookup (
LocationID INT PRIMARY KEY,
Borough TEXT,
Zone TEXT,
service_zone TEXT
);
DROP TABLE IF EXISTS trips;
CREATE TABLE IF NOT EXISTS trips (
ID SERIAL PRIMARY KEY,
Date TEXT,
PULocationID INT,
DOLocationID INT,
passenger_count INT
);
DROP TABLE IF EXISTS pop_destination_passengers_month;
CREATE TABLE IF NOT EXISTS pop_destination_passengers_month(
month TEXT,
pick_up TEXT,
drop_off TEXT,
total_passengers INT,
ranking INT);
DROP TABLE IF EXISTS pop_destination_rides_month;
CREATE TABLE IF NOT EXISTS pop_destination_rides_month(
month TEXT,
pick_up TEXT,
drop_off TEXT,
total_rides TEXT,
ranking INT);
DROP TABLE IF EXISTS popular_rides_full;
CREATE TABLE IF NOT EXISTS popular_rides_full (
month TEXT,
pick_up TEXT,
drop_off TEXT,
ranking INT
);
DROP TABLE IF EXISTS cur_popular_dest;
CREATE TABLE IF NOT EXISTS cur_popular_dest (
pick_up TEXT,
drop_off TEXT,
ranking INT
);
"""
LOAD_TRIPS = """
DELETE FROM trips;
INSERT INTO trips (Date, PULocationID, DOLocationID, passenger_count)
SELECT
to_char(tpep_pickup_datetime, 'YYYY-MM'),
PULocationID,
DOLocationID,
passenger_count
FROM staging_yellow_trips;
"""
COPY_SQL = """
COPY {}
FROM '{}'
WITH DELIMITER ','
HEADER
"""
COPY_STAGING_YELLOW = COPY_SQL.format(
"staging_yellow_trips",
"./data/yellow*"
)
CALC_pop_destination_passengers_month = """
--DELETE FROM pop_destination_passengers_month;
WITH total_passengers AS (
SELECT
t.Date as month,
p.zone as pick_up,
d.zone as drop_off,
sum(t.passenger_count) total_passengers
FROM trips t
LEFT JOIN trips_lookup p
ON p.locationid = t.PULocationID
LEFT JOIN trips_lookup d
ON d.locationid = t.DOLocationID
WHERE t.Date = '{}'
GROUP BY t.Date, p.zone, d.zone
),
ranked_total_passengers AS (
SELECT
*,
rank() OVER (PARTITION BY pick_up ORDER BY total_passengers DESC) as ranking
FROM total_passengers
)
INSERT INTO pop_destination_passengers_month
SELECT
*
FROM ranked_total_passengers
WHERE ranking <= 5;
"""
CALC_pop_destination_rides_month = """
DELETE FROM pop_destination_rides_month;
WITH total_rides AS (
SELECT
t.Date as month,
p.Borough as pick_up,
d.Borough as drop_off,
count(t.ID) total_rides
FROM trips t
LEFT JOIN trips_lookup p
ON p.locationid = t.PULocationID
LEFT JOIN trips_lookup d
ON d.locationid = t.DOLocationID
WHERE t.Date = '2019-03'
GROUP BY t.Date, p.Borough, d.Borough
),
ranked_borough_destination AS (
SELECT
*,
rank() OVER (PARTITION BY pick_up ORDER BY total_rides DESC) as ranking
FROM total_rides
)
INSERT INTO pop_destination_rides_month
SELECT
*
FROM ranked_borough_destination;
"""
Load_popular_rides_full = """
WITH total_rides AS (
SELECT
t.Date as month,
p.Borough as pick_up,
d.Borough as drop_off,
count(t.ID) total_rides
FROM trips t
LEFT JOIN trips_lookup p
ON p.locationid = t.PULocationID
LEFT JOIN trips_lookup d
ON d.locationid = t.DOLocationID
WHERE t.Date = '{}'
GROUP BY t.Date, p.Borough, d.Borough
),
ranked_borough_destination AS (
SELECT
month,
pick_up,
drop_off,
rank() OVER (PARTITION BY pick_up ORDER BY total_rides DESC) as ranking
FROM total_rides
),
prev_rank AS (
SELECT
*
FROM popular_rides_full
WHERE month = '{}'
)
INSERT INTO popular_rides_full
SELECT
current.*
FROM ranked_borough_destination current
LEFT JOIN prev_rank
ON prev_rank.pick_up = current.pick_up
AND prev_rank.drop_off = current.drop_off
AND prev_rank.ranking = current.ranking
WHERE prev_rank.ranking IS NULL
AND current.ranking <= 10
;
"""
CALC_current_pop_dest = """
DELETE FROM cur_popular_dest;
WITH total_rides AS (
SELECT
t.Date as month,
p.Borough as pick_up,
d.Borough as drop_off,
count(t.ID) total_rides
FROM trips t
LEFT JOIN trips_lookup p
ON p.locationid = t.PULocationID
LEFT JOIN trips_lookup d
ON d.locationid = t.DOLocationID
WHERE t.Date = '{}'
GROUP BY t.Date, p.Borough, d.Borough
),
ranked_borough_destination AS (
SELECT
pick_up,
drop_off,
rank() OVER (PARTITION BY pick_up ORDER BY total_rides DESC) as ranking
FROM total_rides
)
INSERT INTO cur_popular_dest
SELECT
*
FROM ranked_borough_destination
WHERE ranking <= 10;
""" | 0.1139 | 0.0566 |
import time
from prometheus_client import CONTENT_TYPE_LATEST, Summary, Counter, generate_latest
from werkzeug.routing import Map, Rule
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from werkzeug.exceptions import InternalServerError
from app.collector import collect_sensors
class AirfilterExporterApplication(object):
"""
Airfilter prometheus collector HTTP handler.
"""
def __init__(self, duration, errors):
self._duration = duration
self._errors = errors
self._url_map = Map([
Rule('/', endpoint='index'),
Rule('/metrics', endpoint='metrics'),
Rule('/sensors', endpoint='sensors'),
])
self._args = {
'sensors': ['sds011', 'sleep', 'ccs811']
}
self._views = {
'index': self.on_index,
'metrics': self.on_metrics,
'sensors': self.on_sensors,
}
def on_sensors(self, sds011='/dev/ttyUSB0', sleep=15, ccs811='false'):
"""
Request handler for /sensors route
"""
start = time.time()
output = collect_sensors(sds011, sleep, ccs811)
response = Response(output)
response.headers['content-type'] = CONTENT_TYPE_LATEST
self._duration.observe(time.time() - start)
return response
def on_metrics(self):
"""
Request handler for /metrics route
"""
response = Response(generate_latest())
response.headers['content-type'] = CONTENT_TYPE_LATEST
return response
def on_index(self):
"""
Request handler for index route (/).
"""
response = Response(
"""<html>
<head><title>Airfilter Exporter</title></head>
<body>
<h1>Airfilter Exporter</h1>
<p>Visit <code>/sensors?sds011="/dev/ttyUSB0"&sleep="15"</code> to use.</p>
</body>
</html>"""
)
response.headers['content-type'] = 'text/html'
return response
def view(self, endpoint, values, args):
"""
Werkzeug views mapping method.
"""
params = dict(values)
if endpoint in self._args:
params.update({key: args[key] for key in self._args[endpoint] if key in args})
try:
return self._views[endpoint](**params)
except Exception as error:
self._errors.inc()
raise InternalServerError(error)
@Request.application
def __call__(self, request):
urls = self._url_map.bind_to_environ(request.environ)
view_func = lambda endpoint, values: self.view(endpoint, values, request.args)
return urls.dispatch(view_func, catch_http_exceptions=True)
def start_http_server(port, address=''):
"""
Start a HTTP API server for airfilter prometheus collector.
"""
duration = Summary(
'airfilter_collection_duration_seconds',
'Duration of collections by the airfilter exporter',
)
errors = Counter(
'airfilter_request_errors_total',
'Errors in requests to airfilter exporter',
)
# Initialize metrics.
errors
duration
app = AirfilterExporterApplication(duration, errors)
run_simple(address, port, app, threaded=True, use_debugger=True) | airfilter-exporter/app/http.py | import time
from prometheus_client import CONTENT_TYPE_LATEST, Summary, Counter, generate_latest
from werkzeug.routing import Map, Rule
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from werkzeug.exceptions import InternalServerError
from app.collector import collect_sensors
class AirfilterExporterApplication(object):
"""
Airfilter prometheus collector HTTP handler.
"""
def __init__(self, duration, errors):
self._duration = duration
self._errors = errors
self._url_map = Map([
Rule('/', endpoint='index'),
Rule('/metrics', endpoint='metrics'),
Rule('/sensors', endpoint='sensors'),
])
self._args = {
'sensors': ['sds011', 'sleep', 'ccs811']
}
self._views = {
'index': self.on_index,
'metrics': self.on_metrics,
'sensors': self.on_sensors,
}
def on_sensors(self, sds011='/dev/ttyUSB0', sleep=15, ccs811='false'):
"""
Request handler for /sensors route
"""
start = time.time()
output = collect_sensors(sds011, sleep, ccs811)
response = Response(output)
response.headers['content-type'] = CONTENT_TYPE_LATEST
self._duration.observe(time.time() - start)
return response
def on_metrics(self):
"""
Request handler for /metrics route
"""
response = Response(generate_latest())
response.headers['content-type'] = CONTENT_TYPE_LATEST
return response
def on_index(self):
"""
Request handler for index route (/).
"""
response = Response(
"""<html>
<head><title>Airfilter Exporter</title></head>
<body>
<h1>Airfilter Exporter</h1>
<p>Visit <code>/sensors?sds011="/dev/ttyUSB0"&sleep="15"</code> to use.</p>
</body>
</html>"""
)
response.headers['content-type'] = 'text/html'
return response
def view(self, endpoint, values, args):
"""
Werkzeug views mapping method.
"""
params = dict(values)
if endpoint in self._args:
params.update({key: args[key] for key in self._args[endpoint] if key in args})
try:
return self._views[endpoint](**params)
except Exception as error:
self._errors.inc()
raise InternalServerError(error)
@Request.application
def __call__(self, request):
urls = self._url_map.bind_to_environ(request.environ)
view_func = lambda endpoint, values: self.view(endpoint, values, request.args)
return urls.dispatch(view_func, catch_http_exceptions=True)
def start_http_server(port, address=''):
"""
Start a HTTP API server for airfilter prometheus collector.
"""
duration = Summary(
'airfilter_collection_duration_seconds',
'Duration of collections by the airfilter exporter',
)
errors = Counter(
'airfilter_request_errors_total',
'Errors in requests to airfilter exporter',
)
# Initialize metrics.
errors
duration
app = AirfilterExporterApplication(duration, errors)
run_simple(address, port, app, threaded=True, use_debugger=True) | 0.621656 | 0.121555 |
from django.conf.urls import include, url
from django.contrib.auth import views as auth_views
from django.contrib import admin
from account.views import *
from community.views import *
from base.views import *
from django.conf.urls.static import static
from django.conf import settings
from rest_framework import routers
from api import views as api_views
from rest_framework.authtoken import views as authtoken_views
# router = routers.DefaultRouter()
# router.register(r'api/users', api_views.UserViewSet)
# router.register(r'api/communities', api_views.CommunityViewSet)
urlpatterns = [
url(r'^$', login_page),
url(r'^logout/$', logout_page),
url(r'^accounts/login/$', login_page), # If user is not login it will redirect to login page
url(r'^admin/', admin.site.urls),
url(r'^register/$', register),
url(r'^register/success/$', register_success),
url(r'^home/$', HomeView.as_view()),
url(r'^community/(?P<community_tag>[a-z||A-Z||0-9]+)$', CommunityView.as_view(), name='community'),
url(r'^search/$', SearchView.as_view()),
url(r'^profile/(?P<profile_id>[a-z||A-Z||0-9||\-]+)$', ProfileView.as_view()),
url(r'^profile/$', profile_page),
]
# API:
urlpatterns += [
url(r'^', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)$', api_views.APICommunityView.as_view()),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)/users$', api_views.APICommunityUsersView.as_view()),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)/badges/(?P<user_id>[a-z||A-Z||0-9]+)$',
api_views.APISingleUserBadgeView.as_view()),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)/badges$', api_views.APICommunityBadgesView.as_view()),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)/leaderboard$', api_views.APILeaderboardView.as_view()),
url(r'^api/replace-token/', api_views.APIReplaceTokenView.as_view()),
url(r'^api/auth-token/', authtoken_views.obtain_auth_token),
url(r'^api/$', api_views.api_root),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | work/urls.py | from django.conf.urls import include, url
from django.contrib.auth import views as auth_views
from django.contrib import admin
from account.views import *
from community.views import *
from base.views import *
from django.conf.urls.static import static
from django.conf import settings
from rest_framework import routers
from api import views as api_views
from rest_framework.authtoken import views as authtoken_views
# router = routers.DefaultRouter()
# router.register(r'api/users', api_views.UserViewSet)
# router.register(r'api/communities', api_views.CommunityViewSet)
urlpatterns = [
url(r'^$', login_page),
url(r'^logout/$', logout_page),
url(r'^accounts/login/$', login_page), # If user is not login it will redirect to login page
url(r'^admin/', admin.site.urls),
url(r'^register/$', register),
url(r'^register/success/$', register_success),
url(r'^home/$', HomeView.as_view()),
url(r'^community/(?P<community_tag>[a-z||A-Z||0-9]+)$', CommunityView.as_view(), name='community'),
url(r'^search/$', SearchView.as_view()),
url(r'^profile/(?P<profile_id>[a-z||A-Z||0-9||\-]+)$', ProfileView.as_view()),
url(r'^profile/$', profile_page),
]
# API:
urlpatterns += [
url(r'^', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)$', api_views.APICommunityView.as_view()),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)/users$', api_views.APICommunityUsersView.as_view()),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)/badges/(?P<user_id>[a-z||A-Z||0-9]+)$',
api_views.APISingleUserBadgeView.as_view()),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)/badges$', api_views.APICommunityBadgesView.as_view()),
url(r'^api/communities/(?P<community_tag>[a-z||A-Z||0-9]+)/leaderboard$', api_views.APILeaderboardView.as_view()),
url(r'^api/replace-token/', api_views.APIReplaceTokenView.as_view()),
url(r'^api/auth-token/', authtoken_views.obtain_auth_token),
url(r'^api/$', api_views.api_root),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 0.281603 | 0.065785 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from neurolab import params as P
from neurolab import utils
from neurolab.model import Model
class Net(Model):
# Layer names
CONV1 = 'conv1'
RELU1 = 'relu1'
POOL1 = 'pool1'
BN1 = 'bn1'
CONV2 = 'conv2'
RELU2 = 'relu2'
BN2 = 'bn2'
CONV3 = 'conv3'
RELU3 = 'relu3'
POOL3 = 'pool3'
BN3 = 'bn3'
CONV4 = 'conv4'
RELU4 = 'relu4'
BN4 = 'bn4'
CONV_OUTPUT = BN4 # Symbolic name for the last convolutional layer providing extracted features
FLAT = 'flat'
FC5 = 'fc5'
RELU5 = 'relu5'
BN5 = 'bn5'
VAE_OUTPUT = 'vae_output' # Symbolic name for the vae output consisting of reconstruction and latent variables statistics
POOL_INDICES = 'pool_indices' # Name of the dictionary entry containing indices resulting from max pooling
def __init__(self, config, input_shape=None):
super(Net, self).__init__(config, input_shape)
self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][P.KEY_DS_NUM_CLASSES]
self.DROPOUT_P = config.CONFIG_OPTIONS.get(P.KEY_DROPOUT_P, 0.5)
self.NUM_LATENT_VARS = 256 #2048
# Here we define the layers of our network
# First convolutional layer
self.conv1 = nn.Conv2d(3, 96, 5) # 3 input channels, 96 output channels, 5x5 convolutions
self.bn1 = nn.BatchNorm2d(96) # Batch Norm layer
# Second convolutional layer
self.conv2 = nn.Conv2d(96, 128, 3) # 96 input channels, 128 output channels, 3x3 convolutions
self.bn2 = nn.BatchNorm2d(128) # Batch Norm layer
# Third convolutional layer
self.conv3 = nn.Conv2d(128, 192, 3) # 128 input channels, 192 output channels, 3x3 convolutions
self.bn3 = nn.BatchNorm2d(192) # Batch Norm layer
# Fourth convolutional layer
self.conv4 = nn.Conv2d(192, 256, 3) # 192 input channels, 256 output channels, 3x3 convolutions
self.bn4 = nn.BatchNorm2d(256) # Batch Norm layer
self.CONV_OUTPUT_SHAPE = self.get_output_fmap_shape(self.CONV_OUTPUT)
self.CONV_OUTPUT_SIZE = utils.shape2size(self.CONV_OUTPUT_SHAPE)
# FC Layers
self.fc5 = nn.Linear(self.CONV_OUTPUT_SIZE, 4096) # conv_output_size-dimensional input, 4096-dimensional output
self.bn5 = nn.BatchNorm1d(4096) # Batch Norm layer
self.fc_mu = nn.Linear(4096, self.NUM_LATENT_VARS) # conv_output_size-dimensional input, NUM_LATENT_VARS-dimensional output
self.fc_var = nn.Linear(4096, self.NUM_LATENT_VARS) # conv_output_size-dimensional input, NUM_LATENT_VARS-dimensional output
# Decoding Layers
self.dec_fc0 = nn.Linear(self.NUM_LATENT_VARS, 4096) # NUM_LATENT_VARS-dimensional input, 4096-dimensional output
self.dec_bn0 = nn.BatchNorm1d(4096) # Batch Norm layer
self.dec_fc1 = nn.Linear(4096, self.CONV_OUTPUT_SIZE) # 4096-dimensional input, CONV_OUTPUT_SIZE-dimensional output
self.dec_bn1 = nn.BatchNorm1d(self.CONV_OUTPUT_SIZE) # Batch Norm layer
self.dec_conv2 = nn.ConvTranspose2d(256, 192, 3) # 256 input channels, 192 output channels, 3x3 transpose convolutions
self.dec_bn2 = nn.BatchNorm2d(192) # Batch Norm layer
self.dec_conv3 = nn.ConvTranspose2d(192, 128, 3) # 192 input channels, 128 output channels, 3x3 transpose convolutions
self.dec_bn3 = nn.BatchNorm2d(128) # Batch Norm layer
self.dec_conv4 = nn.ConvTranspose2d(128, 96, 3) # 128 input channels, 96 output channels, 3x3 transpose convolutions
self.dec_bn4 = nn.BatchNorm2d(96) # Batch Norm layer
self.dec_conv5 = nn.ConvTranspose2d(96, 3, 5) # 96 input channels, 3 output channels, 5x5 transpose convolutions
self.dec_bn5 = nn.BatchNorm2d(3) # Batch Norm layer
def get_conv_output(self, x):
# Layer 1: Convolutional + ReLU activations + 2x2 Max Pooling + Batch Norm
conv1_out = self.conv1(x)
relu1_out = F.relu(conv1_out)
pool1_out, pool1_indices = F.max_pool2d(relu1_out, 2, return_indices=True)
bn1_out = self.bn1(pool1_out)
# Layer 2: Convolutional + ReLU activations + Batch Norm
conv2_out = self.conv2(bn1_out)
relu2_out = F.relu(conv2_out)
bn2_out = self.bn2(relu2_out)
# Layer 3: Convolutional + ReLU activations + 2x2 Max Pooling + Batch Norm
conv3_out = self.conv3(bn2_out)
relu3_out = F.relu(conv3_out)
pool3_out, pool3_indices = F.max_pool2d(relu3_out, 2, return_indices=True)
bn3_out = self.bn3(pool3_out)
# Layer 4: Convolutional + ReLU activations + Batch Norm
conv4_out = self.conv4(bn3_out)
relu4_out = F.relu(conv4_out)
bn4_out = self.bn4(relu4_out)
# Build dictionary containing outputs of each layer
conv_out = {
self.CONV1: conv1_out,
self.RELU1: relu1_out,
self.POOL1: pool1_out,
self.BN1: bn1_out,
self.CONV2: conv2_out,
self.RELU2: relu2_out,
self.BN2: bn2_out,
self.CONV3: conv3_out,
self.RELU3: relu3_out,
self.POOL3: pool3_out,
self.BN3: bn3_out,
self.CONV4: conv4_out,
self.RELU4: relu4_out,
self.BN4: bn4_out,
self.POOL_INDICES: {
self.POOL1: pool1_indices,
self.POOL3: pool3_indices
}
}
return conv_out
# Here we define the flow of information through the network
def forward(self, x):
# Compute the output feature map from the convolutional layers
out = self.get_conv_output(x)
pool_indices = out[self.POOL_INDICES]
# Stretch out the feature map before feeding it to the FC layers
flat = out[self.CONV_OUTPUT].view(-1, self.CONV_OUTPUT_SIZE)
# Fifth Layer: FC with ReLU activations + Batch Norm
fc5_out = self.fc5(flat)
relu5_out = F.relu(fc5_out)
bn5_out = self.bn5(relu5_out)
# Sampling
mu = self.fc_mu(bn5_out)
log_var = self.fc_var(bn5_out)
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
z = eps * std + mu
# Decoding layers: double FC + transpose convolutions + Batch Norm
dec_fc0_out = self.dec_fc0(z)
dec_relu0_out = F.relu(dec_fc0_out)
dec_bn0_out = self.dec_bn0(dec_relu0_out)
dec_fc1_out = self.dec_fc1(dec_bn0_out)
dec_relu1_out = F.relu(dec_fc1_out)
dec_bn1_out = self.dec_bn1(dec_relu1_out)
dec_conv2_out = self.dec_conv2(dec_bn1_out.view(-1, *self.CONV_OUTPUT_SHAPE))
dec_relu2_out = F.relu(dec_conv2_out)
dec_pool2_out = F.max_unpool2d(dec_relu2_out, pool_indices[self.POOL3], 2)
dec_bn2_out = self.dec_bn2(dec_pool2_out)
dec_conv3_out = self.dec_conv3(dec_bn2_out)
dec_relu3_out = F.relu(dec_conv3_out)
dec_bn3_out = self.dec_bn3(dec_relu3_out)
dec_conv4_out = self.dec_conv4(dec_bn3_out)
dec_relu4_out = F.relu(dec_conv4_out)
dec_pool4_out = F.max_unpool2d(dec_relu4_out, pool_indices[self.POOL1], 2)
dec_bn4_out = self.dec_bn4(dec_pool4_out)
dec_conv5_out = self.dec_conv5(dec_bn4_out)
dec_bn5_out = self.dec_bn5(dec_conv5_out)
# Build dictionary containing outputs from convolutional and FC layers
out[self.FLAT] = flat
out[self.FC5] = fc5_out
out[self.RELU5] = relu5_out
out[self.BN5] = bn5_out
out[self.VAE_OUTPUT] = {
P.KEY_AUTOENC_RECONSTR: dec_bn5_out,
P.KEY_VAE_MU: mu,
P.KEY_VAE_LOG_VAR: log_var}
return out | models/gdes/vae_6l.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from neurolab import params as P
from neurolab import utils
from neurolab.model import Model
class Net(Model):
# Layer names
CONV1 = 'conv1'
RELU1 = 'relu1'
POOL1 = 'pool1'
BN1 = 'bn1'
CONV2 = 'conv2'
RELU2 = 'relu2'
BN2 = 'bn2'
CONV3 = 'conv3'
RELU3 = 'relu3'
POOL3 = 'pool3'
BN3 = 'bn3'
CONV4 = 'conv4'
RELU4 = 'relu4'
BN4 = 'bn4'
CONV_OUTPUT = BN4 # Symbolic name for the last convolutional layer providing extracted features
FLAT = 'flat'
FC5 = 'fc5'
RELU5 = 'relu5'
BN5 = 'bn5'
VAE_OUTPUT = 'vae_output' # Symbolic name for the vae output consisting of reconstruction and latent variables statistics
POOL_INDICES = 'pool_indices' # Name of the dictionary entry containing indices resulting from max pooling
def __init__(self, config, input_shape=None):
super(Net, self).__init__(config, input_shape)
self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][P.KEY_DS_NUM_CLASSES]
self.DROPOUT_P = config.CONFIG_OPTIONS.get(P.KEY_DROPOUT_P, 0.5)
self.NUM_LATENT_VARS = 256 #2048
# Here we define the layers of our network
# First convolutional layer
self.conv1 = nn.Conv2d(3, 96, 5) # 3 input channels, 96 output channels, 5x5 convolutions
self.bn1 = nn.BatchNorm2d(96) # Batch Norm layer
# Second convolutional layer
self.conv2 = nn.Conv2d(96, 128, 3) # 96 input channels, 128 output channels, 3x3 convolutions
self.bn2 = nn.BatchNorm2d(128) # Batch Norm layer
# Third convolutional layer
self.conv3 = nn.Conv2d(128, 192, 3) # 128 input channels, 192 output channels, 3x3 convolutions
self.bn3 = nn.BatchNorm2d(192) # Batch Norm layer
# Fourth convolutional layer
self.conv4 = nn.Conv2d(192, 256, 3) # 192 input channels, 256 output channels, 3x3 convolutions
self.bn4 = nn.BatchNorm2d(256) # Batch Norm layer
self.CONV_OUTPUT_SHAPE = self.get_output_fmap_shape(self.CONV_OUTPUT)
self.CONV_OUTPUT_SIZE = utils.shape2size(self.CONV_OUTPUT_SHAPE)
# FC Layers
self.fc5 = nn.Linear(self.CONV_OUTPUT_SIZE, 4096) # conv_output_size-dimensional input, 4096-dimensional output
self.bn5 = nn.BatchNorm1d(4096) # Batch Norm layer
self.fc_mu = nn.Linear(4096, self.NUM_LATENT_VARS) # conv_output_size-dimensional input, NUM_LATENT_VARS-dimensional output
self.fc_var = nn.Linear(4096, self.NUM_LATENT_VARS) # conv_output_size-dimensional input, NUM_LATENT_VARS-dimensional output
# Decoding Layers
self.dec_fc0 = nn.Linear(self.NUM_LATENT_VARS, 4096) # NUM_LATENT_VARS-dimensional input, 4096-dimensional output
self.dec_bn0 = nn.BatchNorm1d(4096) # Batch Norm layer
self.dec_fc1 = nn.Linear(4096, self.CONV_OUTPUT_SIZE) # 4096-dimensional input, CONV_OUTPUT_SIZE-dimensional output
self.dec_bn1 = nn.BatchNorm1d(self.CONV_OUTPUT_SIZE) # Batch Norm layer
self.dec_conv2 = nn.ConvTranspose2d(256, 192, 3) # 256 input channels, 192 output channels, 3x3 transpose convolutions
self.dec_bn2 = nn.BatchNorm2d(192) # Batch Norm layer
self.dec_conv3 = nn.ConvTranspose2d(192, 128, 3) # 192 input channels, 128 output channels, 3x3 transpose convolutions
self.dec_bn3 = nn.BatchNorm2d(128) # Batch Norm layer
self.dec_conv4 = nn.ConvTranspose2d(128, 96, 3) # 128 input channels, 96 output channels, 3x3 transpose convolutions
self.dec_bn4 = nn.BatchNorm2d(96) # Batch Norm layer
self.dec_conv5 = nn.ConvTranspose2d(96, 3, 5) # 96 input channels, 3 output channels, 5x5 transpose convolutions
self.dec_bn5 = nn.BatchNorm2d(3) # Batch Norm layer
def get_conv_output(self, x):
# Layer 1: Convolutional + ReLU activations + 2x2 Max Pooling + Batch Norm
conv1_out = self.conv1(x)
relu1_out = F.relu(conv1_out)
pool1_out, pool1_indices = F.max_pool2d(relu1_out, 2, return_indices=True)
bn1_out = self.bn1(pool1_out)
# Layer 2: Convolutional + ReLU activations + Batch Norm
conv2_out = self.conv2(bn1_out)
relu2_out = F.relu(conv2_out)
bn2_out = self.bn2(relu2_out)
# Layer 3: Convolutional + ReLU activations + 2x2 Max Pooling + Batch Norm
conv3_out = self.conv3(bn2_out)
relu3_out = F.relu(conv3_out)
pool3_out, pool3_indices = F.max_pool2d(relu3_out, 2, return_indices=True)
bn3_out = self.bn3(pool3_out)
# Layer 4: Convolutional + ReLU activations + Batch Norm
conv4_out = self.conv4(bn3_out)
relu4_out = F.relu(conv4_out)
bn4_out = self.bn4(relu4_out)
# Build dictionary containing outputs of each layer
conv_out = {
self.CONV1: conv1_out,
self.RELU1: relu1_out,
self.POOL1: pool1_out,
self.BN1: bn1_out,
self.CONV2: conv2_out,
self.RELU2: relu2_out,
self.BN2: bn2_out,
self.CONV3: conv3_out,
self.RELU3: relu3_out,
self.POOL3: pool3_out,
self.BN3: bn3_out,
self.CONV4: conv4_out,
self.RELU4: relu4_out,
self.BN4: bn4_out,
self.POOL_INDICES: {
self.POOL1: pool1_indices,
self.POOL3: pool3_indices
}
}
return conv_out
# Here we define the flow of information through the network
def forward(self, x):
# Compute the output feature map from the convolutional layers
out = self.get_conv_output(x)
pool_indices = out[self.POOL_INDICES]
# Stretch out the feature map before feeding it to the FC layers
flat = out[self.CONV_OUTPUT].view(-1, self.CONV_OUTPUT_SIZE)
# Fifth Layer: FC with ReLU activations + Batch Norm
fc5_out = self.fc5(flat)
relu5_out = F.relu(fc5_out)
bn5_out = self.bn5(relu5_out)
# Sampling
mu = self.fc_mu(bn5_out)
log_var = self.fc_var(bn5_out)
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
z = eps * std + mu
# Decoding layers: double FC + transpose convolutions + Batch Norm
dec_fc0_out = self.dec_fc0(z)
dec_relu0_out = F.relu(dec_fc0_out)
dec_bn0_out = self.dec_bn0(dec_relu0_out)
dec_fc1_out = self.dec_fc1(dec_bn0_out)
dec_relu1_out = F.relu(dec_fc1_out)
dec_bn1_out = self.dec_bn1(dec_relu1_out)
dec_conv2_out = self.dec_conv2(dec_bn1_out.view(-1, *self.CONV_OUTPUT_SHAPE))
dec_relu2_out = F.relu(dec_conv2_out)
dec_pool2_out = F.max_unpool2d(dec_relu2_out, pool_indices[self.POOL3], 2)
dec_bn2_out = self.dec_bn2(dec_pool2_out)
dec_conv3_out = self.dec_conv3(dec_bn2_out)
dec_relu3_out = F.relu(dec_conv3_out)
dec_bn3_out = self.dec_bn3(dec_relu3_out)
dec_conv4_out = self.dec_conv4(dec_bn3_out)
dec_relu4_out = F.relu(dec_conv4_out)
dec_pool4_out = F.max_unpool2d(dec_relu4_out, pool_indices[self.POOL1], 2)
dec_bn4_out = self.dec_bn4(dec_pool4_out)
dec_conv5_out = self.dec_conv5(dec_bn4_out)
dec_bn5_out = self.dec_bn5(dec_conv5_out)
# Build dictionary containing outputs from convolutional and FC layers
out[self.FLAT] = flat
out[self.FC5] = fc5_out
out[self.RELU5] = relu5_out
out[self.BN5] = bn5_out
out[self.VAE_OUTPUT] = {
P.KEY_AUTOENC_RECONSTR: dec_bn5_out,
P.KEY_VAE_MU: mu,
P.KEY_VAE_LOG_VAR: log_var}
return out | 0.791297 | 0.372391 |
import requests
from threading import *
import sys
import getopt
'''requests用于请求目标站点;
threading用于启用多线程;
sys用于解析命令行参数;
getopt用于处理命令行参数;'''
# 程序标识
def banner():
print("\n********************")
name = '''
.__ .__ ___. .__ __ .__
| |__ |__| \_ |__ |__| | | __ |__|
| | \ | | | __ \ | | | |/ / | |
| Y \ | | | \_\ \ | | | < | |
|___| / |__| |___ / |__| |__|_ \ |__|
\/ \/ \/
'''
print(name)
print("hibiki-暴力发掘器 v0.1")
print("***********************")
# 程序用法
def usage():
print("用法:")
print(" -w:网址 (http://XD.com/FUZZ)")
print(" -t:线程数")
print(" -f:字典文件")
print("例子:暴力发掘器.py -w http://zmister.com/FUZZ -t 5 -f commom.txt")
#创建线程并向目标站点发起请求以及获取响应
class request_performer(Thread):
def __init__(self,word,url):
Thread.__init__(self)
try:
self.word = word.split("\n")[0]
self.urly = url.replace('/FUZZ',self.word)#将FUZZ替换为字典
self.url = self.urly
except Exception as e:
print(e)
#写一个类继承自Thread类,重写run方法。用start方法启动线程 当函数结束时,线程自动终止
def run(self):
try:
r = requests.get(self.url)
print(self.url,"-",str(r.status_code))
i[0] = i[0] -1
except Exception as e:
print(e)
#启动request_performer()类
#launcher_thread(words,threads,url)
def launcher_thread(names,th,url):
global i
i = []
resultlist = []
i.append(0)
while len(names):
try:
if i[0] < th:
n = names.pop(0)
i[0] = i[0]+1
thread = request_performer(n,url)
thread.start()
'''length = len(enumerate())
print('当前运行的线程数为:%d' % length)'''
except KeyboardInterrupt:
print("用户停止了程序运行。完成探测")
sys.exit()
return True
#接收命令行中的参数将其传递给launcher_thread()函数
def start(argv):
banner()
if len(sys.argv) < 5:
usage()
sys.exit()
try:
#getopt被用来解析命令行选项参数。就不用自己写东东处理argv了。
opts,args = getopt.getopt(sys.argv[1:],"w:t:f:")
except getopt.GetoptError:
print("错误的参数")
sys.exit()
print(opts)
print(args)
#opts [('-h', ''), ('-o', 'file'), ('--help', ''), ('--output', 'out')]
for opt,arg in opts:
if opt == '-w':
url = arg
elif opt == '-f':
dicts = arg
elif opt == '-t':
threads = int(arg)
try:
f = open(dicts,'r')
words = f.readlines()
except Exception as e:
print("打开文件错误:",dicts,"\n")
print(e)
sys.exit()
launcher_thread(words,threads,url)
if __name__ == '__main__':
try:
start(sys.argv[1:])
except KeyboardInterrupt:
print("用户停止了程序运行。完成探测") | hibiki.py | import requests
from threading import *
import sys
import getopt
'''requests用于请求目标站点;
threading用于启用多线程;
sys用于解析命令行参数;
getopt用于处理命令行参数;'''
# 程序标识
def banner():
print("\n********************")
name = '''
.__ .__ ___. .__ __ .__
| |__ |__| \_ |__ |__| | | __ |__|
| | \ | | | __ \ | | | |/ / | |
| Y \ | | | \_\ \ | | | < | |
|___| / |__| |___ / |__| |__|_ \ |__|
\/ \/ \/
'''
print(name)
print("hibiki-暴力发掘器 v0.1")
print("***********************")
# 程序用法
def usage():
print("用法:")
print(" -w:网址 (http://XD.com/FUZZ)")
print(" -t:线程数")
print(" -f:字典文件")
print("例子:暴力发掘器.py -w http://zmister.com/FUZZ -t 5 -f commom.txt")
#创建线程并向目标站点发起请求以及获取响应
class request_performer(Thread):
def __init__(self,word,url):
Thread.__init__(self)
try:
self.word = word.split("\n")[0]
self.urly = url.replace('/FUZZ',self.word)#将FUZZ替换为字典
self.url = self.urly
except Exception as e:
print(e)
#写一个类继承自Thread类,重写run方法。用start方法启动线程 当函数结束时,线程自动终止
def run(self):
try:
r = requests.get(self.url)
print(self.url,"-",str(r.status_code))
i[0] = i[0] -1
except Exception as e:
print(e)
#启动request_performer()类
#launcher_thread(words,threads,url)
def launcher_thread(names,th,url):
global i
i = []
resultlist = []
i.append(0)
while len(names):
try:
if i[0] < th:
n = names.pop(0)
i[0] = i[0]+1
thread = request_performer(n,url)
thread.start()
'''length = len(enumerate())
print('当前运行的线程数为:%d' % length)'''
except KeyboardInterrupt:
print("用户停止了程序运行。完成探测")
sys.exit()
return True
#接收命令行中的参数将其传递给launcher_thread()函数
def start(argv):
banner()
if len(sys.argv) < 5:
usage()
sys.exit()
try:
#getopt被用来解析命令行选项参数。就不用自己写东东处理argv了。
opts,args = getopt.getopt(sys.argv[1:],"w:t:f:")
except getopt.GetoptError:
print("错误的参数")
sys.exit()
print(opts)
print(args)
#opts [('-h', ''), ('-o', 'file'), ('--help', ''), ('--output', 'out')]
for opt,arg in opts:
if opt == '-w':
url = arg
elif opt == '-f':
dicts = arg
elif opt == '-t':
threads = int(arg)
try:
f = open(dicts,'r')
words = f.readlines()
except Exception as e:
print("打开文件错误:",dicts,"\n")
print(e)
sys.exit()
launcher_thread(words,threads,url)
if __name__ == '__main__':
try:
start(sys.argv[1:])
except KeyboardInterrupt:
print("用户停止了程序运行。完成探测") | 0.09123 | 0.090534 |
from __future__ import annotations
from asyncio import Future, get_event_loop, wait_for
from re import search
from typing import TYPE_CHECKING, Any, Callable, Dict, Literal, Optional, Union
import aiohttp
# Internal imports
from .internals import CacheHandler, HTTPHandler, WebSocketHandler
if TYPE_CHECKING:
from .channels import Channel
from .enums import PresenceType
from .member import Member
from .server import Server
from .user import User
class Client:
"""
Base voltage client.
Attributes
----------
cache_message_limit: :class:`int`
The maximum amount of messages to cache.
user: :class:`User`
The user of the client.
members: List[:class:`Member`]
The members the client has cached.
servers: List[:class:`Server`]
The servers the client is in.
users: List[:class:`User`]
The users the client has cached.
channels: List[:class:`Channel`]
The channels the client has cached.
Methods
-------
listen:
Registers a function to listen for an event.
run:
Runs the client.
"""
__slots__ = (
"cache_message_limit",
"client",
"error_handlers",
"listeners",
"loop",
"raw_listeners",
"raw_waits",
"waits",
"ws",
"http",
"cache",
"user",
)
def __init__(self, *, cache_message_limit: int = 5000):
self.cache_message_limit = cache_message_limit
self.client = aiohttp.ClientSession()
self.http: HTTPHandler
self.ws: WebSocketHandler
self.listeners: Dict[str, Callable[..., Any]] = {}
self.raw_listeners: Dict[str, Callable[[Dict], Any]] = {}
self.waits: Dict[str, list[tuple[Callable[..., bool], Future[Any]]]] = {}
self.loop = get_event_loop()
self.cache: CacheHandler
self.user: User
self.error_handlers: Dict[str, Callable[..., Any]] = {}
def listen(self, event: str, *, raw: bool = False):
"""
Registers a function to listen for an event.
This function is meant to be used as a decorator.
Parameters
----------
func: Callable[..., Any]
The function to call when the event is triggered.
event: :class:`str`
The event to listen for.
raw: :class:`bool`
Whether or not to listen for raw events.
Examples
--------
.. code-block:: python3
@client.listen("message")
async def any_name_you_want(message):
if message.content == "ping":
await message.channel.send("pong")
# example of a raw event
@client.listen("message", raw=True)
async def raw(payload):
if payload["content"] == "ping":
await client.http.send_message(payload["channel"], "pong")
"""
def inner(func: Callable[..., Any]):
if raw:
self.raw_listeners[event.lower()] = func
else:
self.listeners[event.lower()] = func # Why would we have more than one listener for the same event?
return func
return inner # Returns the function so the user can use it by itself
def error(self, event: str):
"""
Registers a function to handle errors for a specific **non-raw** event.
This function is meant to be used as a decorator.
Parameters
----------
event: :class:`str`
The event to handle errors for.
Examples
--------
.. code-block:: python3
@client.error("message")
async def message_error(error, message):
if isinstance(error, IndexError): # You probably don't want to handle all the index errors like this but this is just an example.
await message.reply("Not enough arguments.")
"""
def inner(func: Callable[..., Any]):
self.error_handlers[event.lower()] = func
return func
return inner
def run(self, token: str, *, bot: bool = True, banner: bool = True):
"""
Run the client.
Parameters
----------
token: :class:`str`
The bot token.
bot: :class:`bool`
Whether or not the client is a bot.
banner: :class:`bool`
Whether or not to print startup banner.
"""
self.loop.run_until_complete(self.start(token, bot=bot, banner=banner))
async def wait_for(
self, event: str, *, timeout: Optional[float] = None, check: Optional[Callable[..., bool]] = None
) -> Any:
"""
Waits for an event to be triggered.
.. note::
The event can be *anything*, be it a message, userupdate or whatever. :trol:
Parameters
----------
event: :class:`str`
The event to wait for.
timeout: Optional[:class:`float`]
The amount of time to wait for the event to be triggered.
check: Optional[Callable[..., bool]]
A function to filter events to a matching predicate, ***must*** return a boolean for it to work properly.
Raises
------
:class:`asyncio.TimeoutError`
If the event wasn't triggered within the timeout.
Examples
--------
.. code-block:: python3
import voltage
client = voltage.Client()
@client.listen("message")
async def message(message):
if message.content == "-wait":
await message.reply("Okay, send something")
msg = await client.wait_for("message", check=lambda m: m.author == message.author)
await message.reply("You sent: " + msg.content)
client.run("token")
"""
if check is None:
check = lambda *_, **__: True
future = self.loop.create_future()
self.waits[event] = self.waits.get(event, []) + [(check, future)]
return await wait_for(future, timeout)
@property
def servers(self) -> list[Server]:
"""The list of servers the client is in."""
return list(self.cache.servers.values())
@property
def users(self) -> list[User]:
"""The list of users the client has cached."""
return list(self.cache.users.values())
@property
def channels(self) -> list[Any]:
"""The list of channels the client has cached."""
return list(self.cache.channels.values())
@property
def members(self) -> list[Member]:
"""The list of members the client has cached."""
members: list[Member] = list()
for (server, servermembers) in self.cache.members.items():
members += list(servermembers.values())
return members
async def start(self, token: str, *, bot: bool = True, banner: bool = True):
"""
Start the client.
Parameters
----------
token: :class:`str`
The bot token.
bot: :class:`bool`
Whether or not the client is a bot.
banner: :class:`bool`
Whether or not to print startup banner.
"""
self.http = HTTPHandler(self.client, token, bot=bot)
self.cache = CacheHandler(self.http, self.loop, self.cache_message_limit)
self.ws = WebSocketHandler(self.client, self.http, self.cache, token, self.dispatch, self.raw_dispatch)
await self.http.get_api_info()
self.user = self.cache.add_user(await self.http.fetch_self())
await self.ws.connect(banner)
async def dispatch(self, event: str, *args, **kwargs):
event = event.lower()
for i in self.waits.get(event, []):
if i[0](*args, **kwargs):
i[1].set_result(*args, **kwargs)
self.waits[event].remove(i)
if func := self.listeners.get(event):
if self.error_handlers.get(event):
try:
await func(*args, **kwargs)
except Exception as e:
await self.error_handlers[event](e, *args, **kwargs)
else:
await func(*args, **kwargs)
async def raw_dispatch(self, payload: Dict[Any, Any]):
event = payload["type"].lower() # Subject to change
if func := self.raw_listeners.get(event):
await func(payload)
def get_user(self, user: str) -> Optional[User]:
"""
Gets a user from the cache by ID, mention or name.
Parameters
----------
user: :class:`str`
The ID, mention or name of the user.
Returns
-------
Optional[:class:`User`]
The user.
"""
return self.cache.get_user(user)
def get_channel(self, channel_id: str) -> Optional[Channel]:
"""
Gets a channel from the cache by ID.
Parameters
----------
channel_id: :class:`str`
The ID of the channel.
Returns
-------
Optional[:class:`Channel`]
The channel.
"""
try:
return self.cache.get_channel(channel_id)
except ValueError:
return None
def get_server(self, server_id: str) -> Optional[Server]:
"""
Gets a server from the cache by ID.
Parameters
----------
server_id: :class:`str`
The ID of the server.
Returns
-------
Optional[:class:`Server`]
The server.
"""
try:
return self.cache.get_server(server_id)
except ValueError:
return None
async def set_status(self, text: Optional[str] = None, presence: Optional[PresenceType] = None):
"""
Sets the client's status.
Parameters
----------
text: Optional[:class:`str`]
The text to set the status to.
presence: Optional[:class:`str`]
The presence to set the status to.
"""
data: dict[Literal["text", "presence"], Union[str, Literal["Online", "Busy", "Idle", "Offline"]]] = {}
if text:
data["text"] = text
if presence:
data["presence"] = presence.value
await self.http.edit_self(status=data) | voltage/client.py | from __future__ import annotations
from asyncio import Future, get_event_loop, wait_for
from re import search
from typing import TYPE_CHECKING, Any, Callable, Dict, Literal, Optional, Union
import aiohttp
# Internal imports
from .internals import CacheHandler, HTTPHandler, WebSocketHandler
if TYPE_CHECKING:
from .channels import Channel
from .enums import PresenceType
from .member import Member
from .server import Server
from .user import User
class Client:
"""
Base voltage client.
Attributes
----------
cache_message_limit: :class:`int`
The maximum amount of messages to cache.
user: :class:`User`
The user of the client.
members: List[:class:`Member`]
The members the client has cached.
servers: List[:class:`Server`]
The servers the client is in.
users: List[:class:`User`]
The users the client has cached.
channels: List[:class:`Channel`]
The channels the client has cached.
Methods
-------
listen:
Registers a function to listen for an event.
run:
Runs the client.
"""
__slots__ = (
"cache_message_limit",
"client",
"error_handlers",
"listeners",
"loop",
"raw_listeners",
"raw_waits",
"waits",
"ws",
"http",
"cache",
"user",
)
def __init__(self, *, cache_message_limit: int = 5000):
self.cache_message_limit = cache_message_limit
self.client = aiohttp.ClientSession()
self.http: HTTPHandler
self.ws: WebSocketHandler
self.listeners: Dict[str, Callable[..., Any]] = {}
self.raw_listeners: Dict[str, Callable[[Dict], Any]] = {}
self.waits: Dict[str, list[tuple[Callable[..., bool], Future[Any]]]] = {}
self.loop = get_event_loop()
self.cache: CacheHandler
self.user: User
self.error_handlers: Dict[str, Callable[..., Any]] = {}
def listen(self, event: str, *, raw: bool = False):
"""
Registers a function to listen for an event.
This function is meant to be used as a decorator.
Parameters
----------
func: Callable[..., Any]
The function to call when the event is triggered.
event: :class:`str`
The event to listen for.
raw: :class:`bool`
Whether or not to listen for raw events.
Examples
--------
.. code-block:: python3
@client.listen("message")
async def any_name_you_want(message):
if message.content == "ping":
await message.channel.send("pong")
# example of a raw event
@client.listen("message", raw=True)
async def raw(payload):
if payload["content"] == "ping":
await client.http.send_message(payload["channel"], "pong")
"""
def inner(func: Callable[..., Any]):
if raw:
self.raw_listeners[event.lower()] = func
else:
self.listeners[event.lower()] = func # Why would we have more than one listener for the same event?
return func
return inner # Returns the function so the user can use it by itself
def error(self, event: str):
"""
Registers a function to handle errors for a specific **non-raw** event.
This function is meant to be used as a decorator.
Parameters
----------
event: :class:`str`
The event to handle errors for.
Examples
--------
.. code-block:: python3
@client.error("message")
async def message_error(error, message):
if isinstance(error, IndexError): # You probably don't want to handle all the index errors like this but this is just an example.
await message.reply("Not enough arguments.")
"""
def inner(func: Callable[..., Any]):
self.error_handlers[event.lower()] = func
return func
return inner
def run(self, token: str, *, bot: bool = True, banner: bool = True):
"""
Run the client.
Parameters
----------
token: :class:`str`
The bot token.
bot: :class:`bool`
Whether or not the client is a bot.
banner: :class:`bool`
Whether or not to print startup banner.
"""
self.loop.run_until_complete(self.start(token, bot=bot, banner=banner))
async def wait_for(
self, event: str, *, timeout: Optional[float] = None, check: Optional[Callable[..., bool]] = None
) -> Any:
"""
Waits for an event to be triggered.
.. note::
The event can be *anything*, be it a message, userupdate or whatever. :trol:
Parameters
----------
event: :class:`str`
The event to wait for.
timeout: Optional[:class:`float`]
The amount of time to wait for the event to be triggered.
check: Optional[Callable[..., bool]]
A function to filter events to a matching predicate, ***must*** return a boolean for it to work properly.
Raises
------
:class:`asyncio.TimeoutError`
If the event wasn't triggered within the timeout.
Examples
--------
.. code-block:: python3
import voltage
client = voltage.Client()
@client.listen("message")
async def message(message):
if message.content == "-wait":
await message.reply("Okay, send something")
msg = await client.wait_for("message", check=lambda m: m.author == message.author)
await message.reply("You sent: " + msg.content)
client.run("token")
"""
if check is None:
check = lambda *_, **__: True
future = self.loop.create_future()
self.waits[event] = self.waits.get(event, []) + [(check, future)]
return await wait_for(future, timeout)
@property
def servers(self) -> list[Server]:
"""The list of servers the client is in."""
return list(self.cache.servers.values())
@property
def users(self) -> list[User]:
"""The list of users the client has cached."""
return list(self.cache.users.values())
@property
def channels(self) -> list[Any]:
"""The list of channels the client has cached."""
return list(self.cache.channels.values())
@property
def members(self) -> list[Member]:
"""The list of members the client has cached."""
members: list[Member] = list()
for (server, servermembers) in self.cache.members.items():
members += list(servermembers.values())
return members
async def start(self, token: str, *, bot: bool = True, banner: bool = True):
"""
Start the client.
Parameters
----------
token: :class:`str`
The bot token.
bot: :class:`bool`
Whether or not the client is a bot.
banner: :class:`bool`
Whether or not to print startup banner.
"""
self.http = HTTPHandler(self.client, token, bot=bot)
self.cache = CacheHandler(self.http, self.loop, self.cache_message_limit)
self.ws = WebSocketHandler(self.client, self.http, self.cache, token, self.dispatch, self.raw_dispatch)
await self.http.get_api_info()
self.user = self.cache.add_user(await self.http.fetch_self())
await self.ws.connect(banner)
async def dispatch(self, event: str, *args, **kwargs):
event = event.lower()
for i in self.waits.get(event, []):
if i[0](*args, **kwargs):
i[1].set_result(*args, **kwargs)
self.waits[event].remove(i)
if func := self.listeners.get(event):
if self.error_handlers.get(event):
try:
await func(*args, **kwargs)
except Exception as e:
await self.error_handlers[event](e, *args, **kwargs)
else:
await func(*args, **kwargs)
async def raw_dispatch(self, payload: Dict[Any, Any]):
event = payload["type"].lower() # Subject to change
if func := self.raw_listeners.get(event):
await func(payload)
def get_user(self, user: str) -> Optional[User]:
"""
Gets a user from the cache by ID, mention or name.
Parameters
----------
user: :class:`str`
The ID, mention or name of the user.
Returns
-------
Optional[:class:`User`]
The user.
"""
return self.cache.get_user(user)
def get_channel(self, channel_id: str) -> Optional[Channel]:
"""
Gets a channel from the cache by ID.
Parameters
----------
channel_id: :class:`str`
The ID of the channel.
Returns
-------
Optional[:class:`Channel`]
The channel.
"""
try:
return self.cache.get_channel(channel_id)
except ValueError:
return None
def get_server(self, server_id: str) -> Optional[Server]:
"""
Gets a server from the cache by ID.
Parameters
----------
server_id: :class:`str`
The ID of the server.
Returns
-------
Optional[:class:`Server`]
The server.
"""
try:
return self.cache.get_server(server_id)
except ValueError:
return None
async def set_status(self, text: Optional[str] = None, presence: Optional[PresenceType] = None):
"""
Sets the client's status.
Parameters
----------
text: Optional[:class:`str`]
The text to set the status to.
presence: Optional[:class:`str`]
The presence to set the status to.
"""
data: dict[Literal["text", "presence"], Union[str, Literal["Online", "Busy", "Idle", "Offline"]]] = {}
if text:
data["text"] = text
if presence:
data["presence"] = presence.value
await self.http.edit_self(status=data) | 0.948882 | 0.19521 |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import hawtorch
import hawtorch.io as io
from hawtorch import Trainer
import models
from torchvision import datasets, transforms
args = io.load_json("mnist_config.json")
logger = io.logger(args["workspace_path"])
def create_trainer():
print("Create Trainer")
device = args["device"]
model = getattr(models, args["model"])()
objective = getattr(nn, args["objective"])()
optimizer = getattr(optim, args["optimizer"])(model.parameters(), lr=args["lr"], weight_decay=args["weight_decay"])
lr_decay = lr_scheduler.StepLR(optimizer, step_size=args["lr_decay_step"], gamma=args["lr_decay"])
metrics = [hawtorch.metrics.ClassificationMeter(10), ]
loaders = create_loaders()
trainer = Trainer(args, model, optimizer, lr_decay, objective, device, loaders, logger,
metrics=metrics,
workspace_path=args["workspace_path"],
eval_set="test",
)
return trainer
def create_loaders():
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args["train_batch_size"],
shuffle=True,
num_workers=1,
pin_memory=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args["test_batch_size"],
shuffle=True,
num_workers=1,
pin_memory=True)
print("create loaders", len(train_loader), len(test_loader))
loaders = {
"train": train_loader,
"test": test_loader
}
return loaders
if __name__ == "__main__":
trainer = create_trainer()
trainer.train(args["epochs"])
trainer.evaluate() | mnist/train.py | import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import hawtorch
import hawtorch.io as io
from hawtorch import Trainer
import models
from torchvision import datasets, transforms
args = io.load_json("mnist_config.json")
logger = io.logger(args["workspace_path"])
def create_trainer():
print("Create Trainer")
device = args["device"]
model = getattr(models, args["model"])()
objective = getattr(nn, args["objective"])()
optimizer = getattr(optim, args["optimizer"])(model.parameters(), lr=args["lr"], weight_decay=args["weight_decay"])
lr_decay = lr_scheduler.StepLR(optimizer, step_size=args["lr_decay_step"], gamma=args["lr_decay"])
metrics = [hawtorch.metrics.ClassificationMeter(10), ]
loaders = create_loaders()
trainer = Trainer(args, model, optimizer, lr_decay, objective, device, loaders, logger,
metrics=metrics,
workspace_path=args["workspace_path"],
eval_set="test",
)
return trainer
def create_loaders():
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args["train_batch_size"],
shuffle=True,
num_workers=1,
pin_memory=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args["test_batch_size"],
shuffle=True,
num_workers=1,
pin_memory=True)
print("create loaders", len(train_loader), len(test_loader))
loaders = {
"train": train_loader,
"test": test_loader
}
return loaders
if __name__ == "__main__":
trainer = create_trainer()
trainer.train(args["epochs"])
trainer.evaluate() | 0.758868 | 0.394026 |
import warnings
import numpy as np
import pandas as pd
from .base import BaseExtensionTests
class BaseDtypeTests(BaseExtensionTests):
"""Base class for ExtensionDtype classes"""
def test_name(self, dtype):
assert isinstance(dtype.name, str)
def test_kind(self, dtype):
valid = set('biufcmMOSUV')
if dtype.kind is not None:
assert dtype.kind in valid
def test_construct_from_string_own_name(self, dtype):
result = dtype.construct_from_string(dtype.name)
assert type(result) is type(dtype)
# check OK as classmethod
result = type(dtype).construct_from_string(dtype.name)
assert type(result) is type(dtype)
def test_is_dtype_from_name(self, dtype):
result = type(dtype).is_dtype(dtype.name)
assert result is True
def test_is_dtype_unboxes_dtype(self, data, dtype):
assert dtype.is_dtype(data) is True
def test_is_dtype_from_self(self, dtype):
result = type(dtype).is_dtype(dtype)
assert result is True
def test_is_not_string_type(self, dtype):
return not pd.api.types.is_string_dtype(dtype)
def test_is_not_object_type(self, dtype):
return not pd.api.types.is_object_dtype(dtype)
def test_eq_with_str(self, dtype):
assert dtype == dtype.name
assert dtype != dtype.name + '-suffix'
def test_eq_with_numpy_object(self, dtype):
assert dtype != np.dtype('object')
def test_eq_with_self(self, dtype):
assert dtype == dtype
assert dtype != object()
def test_array_type(self, data, dtype):
assert dtype.construct_array_type() is type(data)
def test_check_dtype(self, data):
dtype = data.dtype
# check equivalency for using .dtypes
df = pd.DataFrame({'A': pd.Series(data, dtype=dtype),
'B': data,
'C': 'foo', 'D': 1})
# np.dtype('int64') == 'Int64' == 'int64'
# so can't distinguish
if dtype.name == 'Int64':
expected = pd.Series([True, True, False, True],
index=list('ABCD'))
else:
expected = pd.Series([True, True, False, False],
index=list('ABCD'))
# XXX: This should probably be *fixed* not ignored.
# See libops.scalar_compare
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
result = df.dtypes == str(dtype)
self.assert_series_equal(result, expected)
expected = pd.Series([True, True, False, False],
index=list('ABCD'))
result = df.dtypes.apply(str) == str(dtype)
self.assert_series_equal(result, expected)
def test_hashable(self, dtype):
hash(dtype) # no error | pandas/tests/extension/base/dtype.py | import warnings
import numpy as np
import pandas as pd
from .base import BaseExtensionTests
class BaseDtypeTests(BaseExtensionTests):
"""Base class for ExtensionDtype classes"""
def test_name(self, dtype):
assert isinstance(dtype.name, str)
def test_kind(self, dtype):
valid = set('biufcmMOSUV')
if dtype.kind is not None:
assert dtype.kind in valid
def test_construct_from_string_own_name(self, dtype):
result = dtype.construct_from_string(dtype.name)
assert type(result) is type(dtype)
# check OK as classmethod
result = type(dtype).construct_from_string(dtype.name)
assert type(result) is type(dtype)
def test_is_dtype_from_name(self, dtype):
result = type(dtype).is_dtype(dtype.name)
assert result is True
def test_is_dtype_unboxes_dtype(self, data, dtype):
assert dtype.is_dtype(data) is True
def test_is_dtype_from_self(self, dtype):
result = type(dtype).is_dtype(dtype)
assert result is True
def test_is_not_string_type(self, dtype):
return not pd.api.types.is_string_dtype(dtype)
def test_is_not_object_type(self, dtype):
return not pd.api.types.is_object_dtype(dtype)
def test_eq_with_str(self, dtype):
assert dtype == dtype.name
assert dtype != dtype.name + '-suffix'
def test_eq_with_numpy_object(self, dtype):
assert dtype != np.dtype('object')
def test_eq_with_self(self, dtype):
assert dtype == dtype
assert dtype != object()
def test_array_type(self, data, dtype):
assert dtype.construct_array_type() is type(data)
def test_check_dtype(self, data):
dtype = data.dtype
# check equivalency for using .dtypes
df = pd.DataFrame({'A': pd.Series(data, dtype=dtype),
'B': data,
'C': 'foo', 'D': 1})
# np.dtype('int64') == 'Int64' == 'int64'
# so can't distinguish
if dtype.name == 'Int64':
expected = pd.Series([True, True, False, True],
index=list('ABCD'))
else:
expected = pd.Series([True, True, False, False],
index=list('ABCD'))
# XXX: This should probably be *fixed* not ignored.
# See libops.scalar_compare
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
result = df.dtypes == str(dtype)
self.assert_series_equal(result, expected)
expected = pd.Series([True, True, False, False],
index=list('ABCD'))
result = df.dtypes.apply(str) == str(dtype)
self.assert_series_equal(result, expected)
def test_hashable(self, dtype):
hash(dtype) # no error | 0.532911 | 0.492249 |
import argparse
class SDParser:
"""Class for handling CLI interaction."""
def __init__(self) -> None:
"""See class docstring."""
self.parser = argparse.ArgumentParser()
self._add_optionals(self.parser)
self.subparsers = self._add_subparser(self.parser)
self.gen_parser = self._add_gen_parser(self.subparsers)
# self.demo_parser = self._add_demo_parser(self.subparsers)
# self.module_parser = self._add_module_parser(self.subparsers)
# self.script_parser = self._add_script_parser(self.subparsers)
self.arguments = self.parser.parse_args()
def handle_arguments(self, arguments, debug=False) -> None:
"""Handle the parsed arguments."""
if debug:
print(arguments)
if arguments.version is True:
self.print_version()
exit()
elif arguments.subcommand == "gen":
output = self.generate(arguments.schema)
print(output)
exit()
else:
print("internal error")
def print_version(self) -> None:
"""Print the package verison."""
from . import __name__, __version__
print(__name__, __version__)
def generate(self, user_schema_path):
"""Generate documentation based on schema."""
from .generate import generate
return generate(user_schema_path)
def _add_optionals(self, root_parser: argparse.ArgumentParser) -> None:
# optional arguments before sub-commands
root_parser.add_argument(
"-V",
"--version",
action="store_true",
help="print version information and exit",
)
def _add_subparser(
self, root_parser: argparse.ArgumentParser
) -> argparse._SubParsersAction:
# create subparsers object
subparsers = root_parser.add_subparsers(
dest="subcommand", help="available sub-commands"
)
return subparsers
def _add_gen_parser(
self, child_parser: argparse._SubParsersAction
) -> argparse.ArgumentParser:
# create the parser for the "gen" command
parser_gen = child_parser.add_parser(
"gen",
help="generate markdown based on a schema",
description="generate markdown based on a schema",
)
parser_gen.add_argument(
"-s",
"--schema",
action="store",
help="schema to generate documentation for (filepath)",
)
return parser_gen | schemadown/parser.py |
import argparse
class SDParser:
"""Class for handling CLI interaction."""
def __init__(self) -> None:
"""See class docstring."""
self.parser = argparse.ArgumentParser()
self._add_optionals(self.parser)
self.subparsers = self._add_subparser(self.parser)
self.gen_parser = self._add_gen_parser(self.subparsers)
# self.demo_parser = self._add_demo_parser(self.subparsers)
# self.module_parser = self._add_module_parser(self.subparsers)
# self.script_parser = self._add_script_parser(self.subparsers)
self.arguments = self.parser.parse_args()
def handle_arguments(self, arguments, debug=False) -> None:
"""Handle the parsed arguments."""
if debug:
print(arguments)
if arguments.version is True:
self.print_version()
exit()
elif arguments.subcommand == "gen":
output = self.generate(arguments.schema)
print(output)
exit()
else:
print("internal error")
def print_version(self) -> None:
"""Print the package verison."""
from . import __name__, __version__
print(__name__, __version__)
def generate(self, user_schema_path):
"""Generate documentation based on schema."""
from .generate import generate
return generate(user_schema_path)
def _add_optionals(self, root_parser: argparse.ArgumentParser) -> None:
# optional arguments before sub-commands
root_parser.add_argument(
"-V",
"--version",
action="store_true",
help="print version information and exit",
)
def _add_subparser(
self, root_parser: argparse.ArgumentParser
) -> argparse._SubParsersAction:
# create subparsers object
subparsers = root_parser.add_subparsers(
dest="subcommand", help="available sub-commands"
)
return subparsers
def _add_gen_parser(
self, child_parser: argparse._SubParsersAction
) -> argparse.ArgumentParser:
# create the parser for the "gen" command
parser_gen = child_parser.add_parser(
"gen",
help="generate markdown based on a schema",
description="generate markdown based on a schema",
)
parser_gen.add_argument(
"-s",
"--schema",
action="store",
help="schema to generate documentation for (filepath)",
)
return parser_gen | 0.7237 | 0.152568 |
import json
import random
import re
import sys
import threading
import time
from azure.iot.device import IoTHubDeviceClient, Message
AUX_CONNECTION_STRING = sys.argv[1]
AUX_BASE_HEART_RATE = 65
AUX_BASE_BODY_TEMPERATURE = 37.0
AUX_MAXIMUM_BODY_TEMPERATURE = 40.0
#SENSOR DATA WILL HOST SENSOR METRICS
sensor_data = {}
#MESSAGE FOR RECEIVING DATA FROM IoT HUB. THIS METHOD WILL BE CALLED BY THE RECEPTION THREAD
def message_listener(client):
while True:
message = client.receive_message()
print("Message received")
print( " Data: {}".format(message.data) )
print( " Properties: {}".format(message.custom_properties))
#METHOD FOR ONE METRIC
def get_sensor_temperature():
temperature = AUX_BASE_BODY_TEMPERATURE + (random.random() * random.random() * 5)
return temperature
#METHOD FOR ONE METRIC
def get_sensor_heart_rate():
heart_rate = AUX_BASE_HEART_RATE + (random.random() * random.random() * 15)
return heart_rate
#METHOD FOR CUSTOM METRIC
def get_blood_sugar_rate():
blood_sugar_rate = AUX_BASE_HEART_RATE + (random.random() * random.random() * 20)
return blood_sugar_rate
def aux_validate_connection_string():
if not AUX_CONNECTION_STRING.startswith( 'HostName=' ):
print "ERROR - YOUR IoT HUB CONNECTION STRING IS NOT VALID"
print "FORMAT - HostName=your_iot_hub_name.azure-devices.net;DeviceId=your_device_name;SharedAccessKey=your_shared_access_key"
sys.exit()
def aux_iothub_client_init():
client = IoTHubDeviceClient.create_from_connection_string(AUX_CONNECTION_STRING)
return client
def iothub_client_telemetry_sample_run():
try:
aux_validate_connection_string()
client = aux_iothub_client_init()
print ( "IoT Hub Message receiver" )
print ( "Press Ctrl-C to exit" )
#ENABLE THE RECEPTION THREAD, DEFINING THE TARGET METHOD
message_listener_thread = threading.Thread(target=message_listener, args=(client,))
message_listener_thread.daemon = True
message_listener_thread.start()
#IT WILL RUN FOREVER UNLESS YOU STOP IT BY PRESSING CTRL + C
while True:
time.sleep(1000)
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
if __name__ == '__main__':
iothub_client_telemetry_sample_run() | iot-client/iot-hub-client-message.py | import json
import random
import re
import sys
import threading
import time
from azure.iot.device import IoTHubDeviceClient, Message
AUX_CONNECTION_STRING = sys.argv[1]
AUX_BASE_HEART_RATE = 65
AUX_BASE_BODY_TEMPERATURE = 37.0
AUX_MAXIMUM_BODY_TEMPERATURE = 40.0
#SENSOR DATA WILL HOST SENSOR METRICS
sensor_data = {}
#MESSAGE FOR RECEIVING DATA FROM IoT HUB. THIS METHOD WILL BE CALLED BY THE RECEPTION THREAD
def message_listener(client):
while True:
message = client.receive_message()
print("Message received")
print( " Data: {}".format(message.data) )
print( " Properties: {}".format(message.custom_properties))
#METHOD FOR ONE METRIC
def get_sensor_temperature():
temperature = AUX_BASE_BODY_TEMPERATURE + (random.random() * random.random() * 5)
return temperature
#METHOD FOR ONE METRIC
def get_sensor_heart_rate():
heart_rate = AUX_BASE_HEART_RATE + (random.random() * random.random() * 15)
return heart_rate
#METHOD FOR CUSTOM METRIC
def get_blood_sugar_rate():
blood_sugar_rate = AUX_BASE_HEART_RATE + (random.random() * random.random() * 20)
return blood_sugar_rate
def aux_validate_connection_string():
if not AUX_CONNECTION_STRING.startswith( 'HostName=' ):
print "ERROR - YOUR IoT HUB CONNECTION STRING IS NOT VALID"
print "FORMAT - HostName=your_iot_hub_name.azure-devices.net;DeviceId=your_device_name;SharedAccessKey=your_shared_access_key"
sys.exit()
def aux_iothub_client_init():
client = IoTHubDeviceClient.create_from_connection_string(AUX_CONNECTION_STRING)
return client
def iothub_client_telemetry_sample_run():
try:
aux_validate_connection_string()
client = aux_iothub_client_init()
print ( "IoT Hub Message receiver" )
print ( "Press Ctrl-C to exit" )
#ENABLE THE RECEPTION THREAD, DEFINING THE TARGET METHOD
message_listener_thread = threading.Thread(target=message_listener, args=(client,))
message_listener_thread.daemon = True
message_listener_thread.start()
#IT WILL RUN FOREVER UNLESS YOU STOP IT BY PRESSING CTRL + C
while True:
time.sleep(1000)
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
if __name__ == '__main__':
iothub_client_telemetry_sample_run() | 0.1495 | 0.03394 |
from __future__ import unicode_literals
from django.db import transaction, IntegrityError
from django.test import TestCase
from .models import (Place, Restaurant, Waiter, ManualPrimaryKey, RelatedModel,
MultiModel)
class OneToOneTests(TestCase):
def setUp(self):
self.p1 = Place(name='<NAME>', address='944 W. Fullerton')
self.p1.save()
self.p2 = Place(name='Ace Hardware', address='1013 N. Ashland')
self.p2.save()
self.r = Restaurant(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.r.save()
def test_getter(self):
# A Restaurant can access its place.
self.assertEqual(repr(self.r.place), '<Place: Demon Dogs the place>')
# A Place can access its restaurant, if available.
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
# p2 doesn't have an associated restaurant.
with self.assertRaisesMessage(Restaurant.DoesNotExist, 'Place has no restaurant'):
self.p2.restaurant
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(self.p2, 'restaurant'))
def test_setter(self):
# Set the place using assignment notation. Because place is the primary
# key on Restaurant, the save will create a new restaurant
self.r.place = self.p2
self.r.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r.pk)
# Set the place back again, using assignment in the reverse direction.
self.p1.restaurant = self.r
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
# Restaurant.objects.all() just returns the Restaurants, not the Places.
self.assertQuerysetEqual(Restaurant.objects.all(), [
'<Restaurant: Demon Dogs the restaurant>',
])
# Place.objects.all() returns all Places, regardless of whether they
# have Restaurants.
self.assertQuerysetEqual(Place.objects.order_by('name'), [
'<Place: Ace Hardware the place>',
'<Place: Demon Dogs the place>',
])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
# Add a Waiter to the Restaurant.
w = self.r.waiter_set.create(name='Joe')
w.save()
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
# Query the waiters
def assert_filter_waiters(**params):
self.assertQuerysetEqual(Waiter.objects.filter(**params), [
'<Waiter: Joe the waiter at Demon Dogs the restaurant>'
])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1)
assert_filter_waiters(restaurant__pk=self.p1.pk)
assert_filter_waiters(restaurant=self.p1.pk)
assert_filter_waiters(restaurant=self.r)
assert_filter_waiters(id__exact=self.p1.pk)
assert_filter_waiters(pk=self.p1.pk)
# Delete the restaurant; the waiter should also be removed
r = Restaurant.objects.get(pk=self.p1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
# One-to-one fields still work if you create your own primary key
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
# You can have multiple one-to-one fields on a model, too.
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
# This will fail because each one-to-one field must be unique (and
# link2=o1 was used for x1, above).
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
with self.assertRaises(IntegrityError):
with transaction.atomic():
mm.save() | tests/one_to_one/tests.py | from __future__ import unicode_literals
from django.db import transaction, IntegrityError
from django.test import TestCase
from .models import (Place, Restaurant, Waiter, ManualPrimaryKey, RelatedModel,
MultiModel)
class OneToOneTests(TestCase):
def setUp(self):
self.p1 = Place(name='<NAME>', address='944 W. Fullerton')
self.p1.save()
self.p2 = Place(name='Ace Hardware', address='1013 N. Ashland')
self.p2.save()
self.r = Restaurant(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.r.save()
def test_getter(self):
# A Restaurant can access its place.
self.assertEqual(repr(self.r.place), '<Place: Demon Dogs the place>')
# A Place can access its restaurant, if available.
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
# p2 doesn't have an associated restaurant.
with self.assertRaisesMessage(Restaurant.DoesNotExist, 'Place has no restaurant'):
self.p2.restaurant
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(self.p2, 'restaurant'))
def test_setter(self):
# Set the place using assignment notation. Because place is the primary
# key on Restaurant, the save will create a new restaurant
self.r.place = self.p2
self.r.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r.pk)
# Set the place back again, using assignment in the reverse direction.
self.p1.restaurant = self.r
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
# Restaurant.objects.all() just returns the Restaurants, not the Places.
self.assertQuerysetEqual(Restaurant.objects.all(), [
'<Restaurant: Demon Dogs the restaurant>',
])
# Place.objects.all() returns all Places, regardless of whether they
# have Restaurants.
self.assertQuerysetEqual(Place.objects.order_by('name'), [
'<Place: Ace Hardware the place>',
'<Place: Demon Dogs the place>',
])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
# Add a Waiter to the Restaurant.
w = self.r.waiter_set.create(name='Joe')
w.save()
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
# Query the waiters
def assert_filter_waiters(**params):
self.assertQuerysetEqual(Waiter.objects.filter(**params), [
'<Waiter: Joe the waiter at Demon Dogs the restaurant>'
])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1)
assert_filter_waiters(restaurant__pk=self.p1.pk)
assert_filter_waiters(restaurant=self.p1.pk)
assert_filter_waiters(restaurant=self.r)
assert_filter_waiters(id__exact=self.p1.pk)
assert_filter_waiters(pk=self.p1.pk)
# Delete the restaurant; the waiter should also be removed
r = Restaurant.objects.get(pk=self.p1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
# One-to-one fields still work if you create your own primary key
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
# You can have multiple one-to-one fields on a model, too.
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
# This will fail because each one-to-one field must be unique (and
# link2=o1 was used for x1, above).
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
with self.assertRaises(IntegrityError):
with transaction.atomic():
mm.save() | 0.711932 | 0.434461 |
import math
import numpy as np
import torch
from torch.nn import init
np.random.seed(4)
# to compare with pytorch, we use the same init function as pytoch uses
def reset_paramters(weight_shape, bias_shape):
weight = torch.rand(weight_shape)
bias = torch.rand(bias_shape)
init.kaiming_uniform_(weight, a=math.sqrt(5))
fan_in, _ = init._calculate_fan_in_and_fan_out(weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(bias, -bound, bound)
return weight.numpy(), bias.numpy()
class Net(object):
def forward(self, matrix):
raise NotImplementedError
def backward(self, delt_z):
raise NotImplementedError
class Convolution(Net):
"""
Basic convolution arithmetic
A_pre = activate(Z_pre) ---> pre activate layer
P_pre = pooling(A_pre) ---> pre pooling layer
Z = W * P_pre + b ---> convolution layer (here)
A = activate(Z) ---> activate layer
P = pooling(A) ---> pooling layer
"""
def __init__(self, in_channel, kernal_size, out_channel):
# shape
self.in_channel = in_channel
self.kernal_size = kernal_size
self.out_channel = out_channel
# init parameters
self.weights, self.bias = reset_paramters((out_channel, in_channel, kernal_size, kernal_size), (out_channel, 1))
# cache matrix
self.Z = None
self.P_pre = None
def forward(self, P_pre):
# get origin shape of image
m, c, h, w = P_pre.shape
self.P_pre = P_pre
# calculate output shape after convolution
h_steps = h - self.kernal_size + 1
w_steps = w - self.kernal_size + 1
self.Z = np.zeros((m, self.out_channel, h_steps, w_steps))
out_matrix_m, out_matrix_c, out_matrix_h, out_matrix_w = self.Z.shape
# convolution
for i in range(out_matrix_m):
for c in range(out_matrix_c):
for h in range(out_matrix_h):
for w in range(out_matrix_w):
X = P_pre[i, :, h:h + self.kernal_size, w:w + self.kernal_size]
W = self.weights[c][:][:][:]
B = self.bias[c]
assert X.shape == W.shape
z = float(np.sum(W * X) + B)
self.Z[i, c, h, w] = z
return self.Z
def backward(self, delt_Z):
"""
:param delt_Z: gradient of Z, backward from activate layer
:return: gradients of W, b and A_pre
"""
# get matrix shape
out_channel, in_channel, kernal_size, kernal_size = self.weights.shape
m, n_C, n_H, n_W = delt_Z.shape
# init zeros matrix to store gradient
delt_P_pre = np.zeros(self.P_pre.shape)
delt_weight = np.zeros((out_channel, in_channel, kernal_size, kernal_size))
delt_bias = np.zeros((out_channel, 1))
# calculate gradient
for i in range(m):
for c in range(n_C):
for h in range(n_H):
for w in range(n_W):
a_slice = self.P_pre[i, :, h:h + kernal_size, w:w + kernal_size]
delt_P_pre[i, :, h:h + kernal_size, w:w + kernal_size] += self.weights[c, :, :, :] \
* delt_Z[i, c, h, w]
delt_weight[c, :, :, :] += delt_Z[i, c, h, w] * a_slice
delt_bias[c, :] += delt_Z[i, c, h, w]
# gradients mean
delt_weight = delt_weight / m
delt_bias = delt_bias / m
return delt_weight, delt_bias, delt_P_pre
class MaxPooling(Net):
"""
Max Pooling arithmetic
A = activate(Z) ---> activate layer
P = pooling(A) ---> pooling layer (here)
Z_next = W * P + b ---> convolution layer
"""
def __init__(self, kernal_size):
# shape
self.kernal_size = kernal_size
# cache matrix
self.mask_A = None
self.A = None
self.P = None
def forward(self, A):
# get origin shape of matrix
m, c, h, w = A.shape
# calculate output shape after pooling
self.A = A
self.mask_A = np.zeros(A.shape) # mask matrix store location
h_steps = int(h / 2)
w_steps = int(w / 2)
self.P = np.zeros((m, c, h_steps, w_steps))
out_matrix_m, out_matrix_c, out_matrix_h, out_matrix_w = self.P.shape
# max pooling, find the max value on (kernal_size*kernal_size) window and store location in self.mask_matrix
for i in range(out_matrix_m):
for c in range(out_matrix_c):
for h in range(out_matrix_h):
for w in range(out_matrix_w):
h_start = h * self.kernal_size
w_start = w * self.kernal_size
slice_matrix = A[i, c, h_start:h_start + 2, w_start:w_start + 2]
self.P[i, c, h, w] = np.max(slice_matrix)
location = np.where(slice_matrix == np.max(slice_matrix))
self.mask_A[i, c, h_start + location[0][0], w_start + location[1][0]] = 1
return self.P
def backward(self, delt_pre):
# init zeros matrix to store gradient
self.delt_A = np.zeros(self.A.shape)
# calculate gradients
out_matrix_m, out_matrix_c, out_matrix_h, out_matrix_w = self.P.shape
for i in range(out_matrix_m):
for c in range(out_matrix_c):
for h in range(out_matrix_h):
for w in range(out_matrix_w):
h_start = h * self.kernal_size
w_start = w * self.kernal_size
self.delt_A[i, c, h_start:h_start + 2, w_start:w_start + 2] = delt_pre[i, c, h, w]
delt_pool = self.delt_A * self.mask_A
return delt_pool
class FC(Net):
"""
Basic fully connect arithmetic
A_pre = activate(Z_pre) ---> pre activate layer
Z = W * A_pre + b ---> fc layer (here)
A = activate(Z) ---> activate layer
"""
def __init__(self, input_dim, out_dim):
# init parameters
self.weights, self.bias = reset_paramters((out_dim, input_dim), (out_dim, 1))
# cache matrix
self.Z = None
self.A_pre = None
def forward(self, A_pre):
self.Z = np.dot(self.weights, A_pre) + self.bias # W* A_pre + b = Z
self.A_pre = A_pre
return self.Z
def backward(self, delt_Z):
"""
:param delt_Z: gradient of Z, backward from activate layer
:return: gradients of W, b and A_pre
"""
# calculate gradients
delt_A_pre = np.dot(self.weights.transpose(), delt_Z)
delt_weight = np.dot(delt_Z, self.A_pre.transpose())
delt_bias = delt_Z.sum(axis=1).reshape(-1, 1)
# gradients mean
m = delt_Z.shape[1]
delt_weight = delt_weight / m
delt_bias = delt_bias / m
return delt_weight, delt_bias, delt_A_pre | vanilia_convolution/neural_layer.py | import math
import numpy as np
import torch
from torch.nn import init
np.random.seed(4)
# to compare with pytorch, we use the same init function as pytoch uses
def reset_paramters(weight_shape, bias_shape):
weight = torch.rand(weight_shape)
bias = torch.rand(bias_shape)
init.kaiming_uniform_(weight, a=math.sqrt(5))
fan_in, _ = init._calculate_fan_in_and_fan_out(weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(bias, -bound, bound)
return weight.numpy(), bias.numpy()
class Net(object):
def forward(self, matrix):
raise NotImplementedError
def backward(self, delt_z):
raise NotImplementedError
class Convolution(Net):
"""
Basic convolution arithmetic
A_pre = activate(Z_pre) ---> pre activate layer
P_pre = pooling(A_pre) ---> pre pooling layer
Z = W * P_pre + b ---> convolution layer (here)
A = activate(Z) ---> activate layer
P = pooling(A) ---> pooling layer
"""
def __init__(self, in_channel, kernal_size, out_channel):
# shape
self.in_channel = in_channel
self.kernal_size = kernal_size
self.out_channel = out_channel
# init parameters
self.weights, self.bias = reset_paramters((out_channel, in_channel, kernal_size, kernal_size), (out_channel, 1))
# cache matrix
self.Z = None
self.P_pre = None
def forward(self, P_pre):
# get origin shape of image
m, c, h, w = P_pre.shape
self.P_pre = P_pre
# calculate output shape after convolution
h_steps = h - self.kernal_size + 1
w_steps = w - self.kernal_size + 1
self.Z = np.zeros((m, self.out_channel, h_steps, w_steps))
out_matrix_m, out_matrix_c, out_matrix_h, out_matrix_w = self.Z.shape
# convolution
for i in range(out_matrix_m):
for c in range(out_matrix_c):
for h in range(out_matrix_h):
for w in range(out_matrix_w):
X = P_pre[i, :, h:h + self.kernal_size, w:w + self.kernal_size]
W = self.weights[c][:][:][:]
B = self.bias[c]
assert X.shape == W.shape
z = float(np.sum(W * X) + B)
self.Z[i, c, h, w] = z
return self.Z
def backward(self, delt_Z):
"""
:param delt_Z: gradient of Z, backward from activate layer
:return: gradients of W, b and A_pre
"""
# get matrix shape
out_channel, in_channel, kernal_size, kernal_size = self.weights.shape
m, n_C, n_H, n_W = delt_Z.shape
# init zeros matrix to store gradient
delt_P_pre = np.zeros(self.P_pre.shape)
delt_weight = np.zeros((out_channel, in_channel, kernal_size, kernal_size))
delt_bias = np.zeros((out_channel, 1))
# calculate gradient
for i in range(m):
for c in range(n_C):
for h in range(n_H):
for w in range(n_W):
a_slice = self.P_pre[i, :, h:h + kernal_size, w:w + kernal_size]
delt_P_pre[i, :, h:h + kernal_size, w:w + kernal_size] += self.weights[c, :, :, :] \
* delt_Z[i, c, h, w]
delt_weight[c, :, :, :] += delt_Z[i, c, h, w] * a_slice
delt_bias[c, :] += delt_Z[i, c, h, w]
# gradients mean
delt_weight = delt_weight / m
delt_bias = delt_bias / m
return delt_weight, delt_bias, delt_P_pre
class MaxPooling(Net):
"""
Max Pooling arithmetic
A = activate(Z) ---> activate layer
P = pooling(A) ---> pooling layer (here)
Z_next = W * P + b ---> convolution layer
"""
def __init__(self, kernal_size):
# shape
self.kernal_size = kernal_size
# cache matrix
self.mask_A = None
self.A = None
self.P = None
def forward(self, A):
# get origin shape of matrix
m, c, h, w = A.shape
# calculate output shape after pooling
self.A = A
self.mask_A = np.zeros(A.shape) # mask matrix store location
h_steps = int(h / 2)
w_steps = int(w / 2)
self.P = np.zeros((m, c, h_steps, w_steps))
out_matrix_m, out_matrix_c, out_matrix_h, out_matrix_w = self.P.shape
# max pooling, find the max value on (kernal_size*kernal_size) window and store location in self.mask_matrix
for i in range(out_matrix_m):
for c in range(out_matrix_c):
for h in range(out_matrix_h):
for w in range(out_matrix_w):
h_start = h * self.kernal_size
w_start = w * self.kernal_size
slice_matrix = A[i, c, h_start:h_start + 2, w_start:w_start + 2]
self.P[i, c, h, w] = np.max(slice_matrix)
location = np.where(slice_matrix == np.max(slice_matrix))
self.mask_A[i, c, h_start + location[0][0], w_start + location[1][0]] = 1
return self.P
def backward(self, delt_pre):
# init zeros matrix to store gradient
self.delt_A = np.zeros(self.A.shape)
# calculate gradients
out_matrix_m, out_matrix_c, out_matrix_h, out_matrix_w = self.P.shape
for i in range(out_matrix_m):
for c in range(out_matrix_c):
for h in range(out_matrix_h):
for w in range(out_matrix_w):
h_start = h * self.kernal_size
w_start = w * self.kernal_size
self.delt_A[i, c, h_start:h_start + 2, w_start:w_start + 2] = delt_pre[i, c, h, w]
delt_pool = self.delt_A * self.mask_A
return delt_pool
class FC(Net):
"""
Basic fully connect arithmetic
A_pre = activate(Z_pre) ---> pre activate layer
Z = W * A_pre + b ---> fc layer (here)
A = activate(Z) ---> activate layer
"""
def __init__(self, input_dim, out_dim):
# init parameters
self.weights, self.bias = reset_paramters((out_dim, input_dim), (out_dim, 1))
# cache matrix
self.Z = None
self.A_pre = None
def forward(self, A_pre):
self.Z = np.dot(self.weights, A_pre) + self.bias # W* A_pre + b = Z
self.A_pre = A_pre
return self.Z
def backward(self, delt_Z):
"""
:param delt_Z: gradient of Z, backward from activate layer
:return: gradients of W, b and A_pre
"""
# calculate gradients
delt_A_pre = np.dot(self.weights.transpose(), delt_Z)
delt_weight = np.dot(delt_Z, self.A_pre.transpose())
delt_bias = delt_Z.sum(axis=1).reshape(-1, 1)
# gradients mean
m = delt_Z.shape[1]
delt_weight = delt_weight / m
delt_bias = delt_bias / m
return delt_weight, delt_bias, delt_A_pre | 0.863938 | 0.601184 |