repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
pyoceans/python-ctd | ctd/read.py | from_edf | python | def from_edf(fname):
f = _read_file(fname)
header, names = [], []
for k, line in enumerate(f.readlines()):
line = line.strip()
if line.startswith("Serial Number"):
serial = line.strip().split(":")[1].strip()
elif line.startswith("Latitude"):
try:
hemisphere = line[-1]
lat = line.strip(hemisphere).split(":")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
except (IndexError, ValueError):
lat = None
elif line.startswith("Longitude"):
try:
hemisphere = line[-1]
lon = line.strip(hemisphere).split(":")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
except (IndexError, ValueError):
lon = None
else:
header.append(line)
if line.startswith("Field"):
col, unit = [l.strip().lower() for l in line.split(":")]
names.append(unit.split()[0])
if line == "// Data":
skiprows = k + 1
break
f.seek(0)
df = pd.read_csv(
f,
header=None,
index_col=None,
names=names,
skiprows=skiprows,
delim_whitespace=True,
)
f.close()
df.set_index("depth", drop=True, inplace=True)
df.index.name = "Depth [m]"
name = _basename(fname)[1]
metadata = {
"lon": lon,
"lat": lat,
"name": str(name),
"header": "\n".join(header),
"serial": serial,
}
setattr(df, "_metadata", metadata)
return df | DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz'))
>>> ax = cast['temperature'].plot_cast() | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/read.py#L259-L332 | [
"def _basename(fname):\n \"\"\"Return file name without path.\"\"\"\n if not isinstance(fname, Path):\n fname = Path(fname)\n path, name, ext = fname.parent, fname.stem, fname.suffix\n return path, name, ext\n",
"def _read_file(fname):\n if not isinstance(fname, Path):\n fname = Path(fname).resolve()\n\n extension = fname.suffix.lower()\n if extension in [\".gzip\", \".gz\", \".bz2\", \".zip\"]:\n contents = _open_compressed(fname)\n elif extension in [\".cnv\", \".edf\", \".txt\", \".ros\", \".btl\"]:\n contents = fname.read_bytes()\n else:\n raise ValueError(\n f\"Unrecognized file extension. Expected .cnv, .edf, .txt, .ros, or .btl got {extension}\"\n )\n # Read as bytes but we need to return strings for the parsers.\n text = contents.decode(encoding=\"utf-8\", errors=\"replace\")\n return StringIO(text)\n"
] | import bz2
import gzip
import linecache
import re
import warnings
import zipfile
from datetime import datetime
from io import StringIO
from pathlib import Path
import numpy as np
import pandas as pd
def _basename(fname):
"""Return file name without path."""
if not isinstance(fname, Path):
fname = Path(fname)
path, name, ext = fname.parent, fname.stem, fname.suffix
return path, name, ext
def _normalize_names(name):
name = name.strip()
name = name.strip("*")
return name
def _open_compressed(fname):
extension = fname.suffix.lower()
if extension in [".gzip", ".gz"]:
cfile = gzip.open(str(fname))
elif extension == ".bz2":
cfile = bz2.BZ2File(str(fname))
elif extension == ".zip":
# NOTE: Zip format may contain more than one file in the archive
# (similar to tar), here we assume that there is just one file per
# zipfile! Also, we ask for the name because it can be different from
# the zipfile file!!
zfile = zipfile.ZipFile(str(fname))
name = zfile.namelist()[0]
cfile = zfile.open(name)
else:
raise ValueError(
"Unrecognized file extension. Expected .gzip, .bz2, or .zip, got {}".format(
extension
)
)
contents = cfile.read()
cfile.close()
return contents
def _read_file(fname):
if not isinstance(fname, Path):
fname = Path(fname).resolve()
extension = fname.suffix.lower()
if extension in [".gzip", ".gz", ".bz2", ".zip"]:
contents = _open_compressed(fname)
elif extension in [".cnv", ".edf", ".txt", ".ros", ".btl"]:
contents = fname.read_bytes()
else:
raise ValueError(
f"Unrecognized file extension. Expected .cnv, .edf, .txt, .ros, or .btl got {extension}"
)
# Read as bytes but we need to return strings for the parsers.
text = contents.decode(encoding="utf-8", errors="replace")
return StringIO(text)
def _parse_seabird(lines, ftype="cnv"):
# Initialize variables.
lon = lat = time = None, None, None
skiprows = 0
metadata = {}
header, config, names = [], [], []
for k, line in enumerate(lines):
line = line.strip()
# Only cnv has columns names, for bottle files we will use the variable row.
if ftype == "cnv":
if "# name" in line:
name, unit = line.split("=")[1].split(":")
name, unit = list(map(_normalize_names, (name, unit)))
names.append(name)
# Seabird headers starts with *.
if line.startswith("*"):
header.append(line)
# Seabird configuration starts with #.
if line.startswith("#"):
config.append(line)
# NMEA position and time.
if "NMEA Latitude" in line:
hemisphere = line[-1]
lat = line.strip(hemisphere).split("=")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA Longitude" in line:
hemisphere = line[-1]
lon = line.strip(hemisphere).split("=")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA UTC (Time)" in line:
time = line.split("=")[-1].strip()
# Should use some fuzzy datetime parser to make this more robust.
time = datetime.strptime(time, "%b %d %Y %H:%M:%S")
# cnv file header ends with *END* while
if ftype == "cnv":
if line == "*END*":
skiprows = k + 1
break
else: # btl.
# There is no *END* like in a .cnv file, skip two after header info.
if not (line.startswith("*") | line.startswith("#")):
# Fix commonly occurring problem when Sbeox.* exists in the file
# the name is concatenated to previous parameter
# example:
# CStarAt0Sbeox0Mm/Kg to CStarAt0 Sbeox0Mm/Kg (really two different params)
line = re.sub(r"(\S)Sbeox", "\\1 Sbeox", line)
names = line.split()
skiprows = k + 2
break
if ftype == "btl":
# Capture stat names column.
names.append("Statistic")
metadata.update(
{
"header": "\n".join(header),
"config": "\n".join(config),
"names": names,
"skiprows": skiprows,
"time": time,
"lon": lon,
"lat": lat,
}
)
return metadata
def from_bl(fname):
"""Read Seabird bottle-trip (bl) file
Example
-------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> df = ctd.from_bl(str(data_path.joinpath('bl', 'bottletest.bl')))
>>> df._metadata["time_of_reset"]
datetime.datetime(2018, 6, 25, 20, 8, 55)
"""
df = pd.read_csv(
fname,
skiprows=2,
parse_dates=[1],
index_col=0,
names=["bottle_number", "time", "startscan", "endscan"],
)
df._metadata = {
"time_of_reset": pd.to_datetime(
linecache.getline(str(fname), 2)[6:-1]
).to_pydatetime()
}
return df
def from_btl(fname):
"""
DataFrame constructor to open Seabird CTD BTL-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> bottles = ctd.from_btl(data_path.joinpath('btl', 'bottletest.btl'))
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="btl")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=False,
names=metadata["names"],
parse_dates=False,
skiprows=metadata["skiprows"],
)
f.close()
# At this point the data frame is not correctly lined up (multiple rows
# for avg, std, min, max or just avg, std, etc).
# Also needs date,time,and bottle number to be converted to one per line.
# Get row types, see what you have: avg, std, min, max or just avg, std.
rowtypes = df[df.columns[-1]].unique()
# Get times and dates which occur on second line of each bottle.
dates = df.iloc[:: len(rowtypes), 1].reset_index(drop=True)
times = df.iloc[1 :: len(rowtypes), 1].reset_index(drop=True)
datetimes = dates + " " + times
# Fill the Date column with datetimes.
df.loc[:: len(rowtypes), "Date"] = datetimes.values
df.loc[1 :: len(rowtypes), "Date"] = datetimes.values
# Fill missing rows.
df["Bottle"] = df["Bottle"].fillna(method="ffill")
df["Date"] = df["Date"].fillna(method="ffill")
df["Statistic"] = df["Statistic"].str.replace(r"\(|\)", "") # (avg) to avg
name = _basename(fname)[1]
dtypes = {
"bpos": int,
"pumps": bool,
"flag": bool,
"Bottle": int,
"Scan": int,
"Statistic": str,
"Date": str,
}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
df["Date"] = pd.to_datetime(df["Date"])
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
def from_cnv(fname):
"""
DataFrame constructor to open Seabird CTD CNV-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['t090C'].plot_cast()
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="cnv")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=None,
names=metadata["names"],
skiprows=metadata["skiprows"],
delim_whitespace=True,
widths=[11] * len(metadata["names"]),
)
f.close()
key_set = False
prkeys = ["prDM", "prdM", "pr"]
for prkey in prkeys:
try:
df.set_index(prkey, drop=True, inplace=True)
key_set = True
except KeyError:
continue
if not key_set:
raise KeyError(
f"Could not find pressure field (supported names are {prkeys})."
)
df.index.name = "Pressure [dbar]"
name = _basename(fname)[1]
dtypes = {"bpos": int, "pumps": bool, "flag": bool}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
def from_fsi(fname, skiprows=9):
"""
DataFrame constructor to open Falmouth Scientific, Inc. (FSI) CTD
ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_fsi(data_path.joinpath('FSI.txt.gz'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['TEMP'].plot_cast()
"""
f = _read_file(fname)
df = pd.read_csv(
f,
header="infer",
index_col=None,
skiprows=skiprows,
dtype=float,
delim_whitespace=True,
)
f.close()
df.set_index("PRES", drop=True, inplace=True)
df.index.name = "Pressure [dbar]"
metadata = {"name": str(fname)}
setattr(df, "_metadata", metadata)
return df
def rosette_summary(fname):
"""
Make a BTL (bottle) file from a ROS (bottle log) file.
More control for the averaging process and at which step we want to
perform this averaging eliminating the need to read the data into SBE
Software again after pre-processing.
NOTE: Do not run LoopEdit on the upcast!
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> fname = data_path.joinpath('CTD/g01l01s01.ros')
>>> ros = ctd.rosette_summary(fname)
>>> ros = ros.groupby(ros.index).mean()
>>> ros.pressure.values.astype(int)
array([835, 806, 705, 604, 503, 404, 303, 201, 151, 100, 51, 1])
"""
ros = from_cnv(fname)
ros["pressure"] = ros.index.values.astype(float)
ros["nbf"] = ros["nbf"].astype(int)
ros.set_index("nbf", drop=True, inplace=True, verify_integrity=False)
return ros
|
pyoceans/python-ctd | ctd/read.py | from_cnv | python | def from_cnv(fname):
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="cnv")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=None,
names=metadata["names"],
skiprows=metadata["skiprows"],
delim_whitespace=True,
widths=[11] * len(metadata["names"]),
)
f.close()
key_set = False
prkeys = ["prDM", "prdM", "pr"]
for prkey in prkeys:
try:
df.set_index(prkey, drop=True, inplace=True)
key_set = True
except KeyError:
continue
if not key_set:
raise KeyError(
f"Could not find pressure field (supported names are {prkeys})."
)
df.index.name = "Pressure [dbar]"
name = _basename(fname)[1]
dtypes = {"bpos": int, "pumps": bool, "flag": bool}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df | DataFrame constructor to open Seabird CTD CNV-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['t090C'].plot_cast() | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/read.py#L335-L392 | [
"def _basename(fname):\n \"\"\"Return file name without path.\"\"\"\n if not isinstance(fname, Path):\n fname = Path(fname)\n path, name, ext = fname.parent, fname.stem, fname.suffix\n return path, name, ext\n",
"def _read_file(fname):\n if not isinstance(fname, Path):\n fname = Path(fname).resolve()\n\n extension = fname.suffix.lower()\n if extension in [\".gzip\", \".gz\", \".bz2\", \".zip\"]:\n contents = _open_compressed(fname)\n elif extension in [\".cnv\", \".edf\", \".txt\", \".ros\", \".btl\"]:\n contents = fname.read_bytes()\n else:\n raise ValueError(\n f\"Unrecognized file extension. Expected .cnv, .edf, .txt, .ros, or .btl got {extension}\"\n )\n # Read as bytes but we need to return strings for the parsers.\n text = contents.decode(encoding=\"utf-8\", errors=\"replace\")\n return StringIO(text)\n",
"def _parse_seabird(lines, ftype=\"cnv\"):\n # Initialize variables.\n lon = lat = time = None, None, None\n skiprows = 0\n\n metadata = {}\n header, config, names = [], [], []\n for k, line in enumerate(lines):\n line = line.strip()\n\n # Only cnv has columns names, for bottle files we will use the variable row.\n if ftype == \"cnv\":\n if \"# name\" in line:\n name, unit = line.split(\"=\")[1].split(\":\")\n name, unit = list(map(_normalize_names, (name, unit)))\n names.append(name)\n\n # Seabird headers starts with *.\n if line.startswith(\"*\"):\n header.append(line)\n\n # Seabird configuration starts with #.\n if line.startswith(\"#\"):\n config.append(line)\n\n # NMEA position and time.\n if \"NMEA Latitude\" in line:\n hemisphere = line[-1]\n lat = line.strip(hemisphere).split(\"=\")[1].strip()\n lat = np.float_(lat.split())\n if hemisphere == \"S\":\n lat = -(lat[0] + lat[1] / 60.0)\n elif hemisphere == \"N\":\n lat = lat[0] + lat[1] / 60.0\n else:\n raise ValueError(\"Latitude not recognized.\")\n if \"NMEA Longitude\" in line:\n hemisphere = line[-1]\n lon = line.strip(hemisphere).split(\"=\")[1].strip()\n lon = np.float_(lon.split())\n if hemisphere == \"W\":\n lon = -(lon[0] + lon[1] / 60.0)\n elif hemisphere == \"E\":\n lon = lon[0] + lon[1] / 60.0\n else:\n raise ValueError(\"Latitude not recognized.\")\n if \"NMEA UTC (Time)\" in line:\n time = line.split(\"=\")[-1].strip()\n # Should use some fuzzy datetime parser to make this more robust.\n time = datetime.strptime(time, \"%b %d %Y %H:%M:%S\")\n\n # cnv file header ends with *END* while\n if ftype == \"cnv\":\n if line == \"*END*\":\n skiprows = k + 1\n break\n else: # btl.\n # There is no *END* like in a .cnv file, skip two after header info.\n if not (line.startswith(\"*\") | line.startswith(\"#\")):\n # Fix commonly occurring problem when Sbeox.* exists in the file\n # the name is concatenated to previous parameter\n # example:\n # CStarAt0Sbeox0Mm/Kg to CStarAt0 Sbeox0Mm/Kg (really two different params)\n line = re.sub(r\"(\\S)Sbeox\", \"\\\\1 Sbeox\", line)\n\n names = line.split()\n skiprows = k + 2\n break\n if ftype == \"btl\":\n # Capture stat names column.\n names.append(\"Statistic\")\n metadata.update(\n {\n \"header\": \"\\n\".join(header),\n \"config\": \"\\n\".join(config),\n \"names\": names,\n \"skiprows\": skiprows,\n \"time\": time,\n \"lon\": lon,\n \"lat\": lat,\n }\n )\n return metadata\n"
] | import bz2
import gzip
import linecache
import re
import warnings
import zipfile
from datetime import datetime
from io import StringIO
from pathlib import Path
import numpy as np
import pandas as pd
def _basename(fname):
"""Return file name without path."""
if not isinstance(fname, Path):
fname = Path(fname)
path, name, ext = fname.parent, fname.stem, fname.suffix
return path, name, ext
def _normalize_names(name):
name = name.strip()
name = name.strip("*")
return name
def _open_compressed(fname):
extension = fname.suffix.lower()
if extension in [".gzip", ".gz"]:
cfile = gzip.open(str(fname))
elif extension == ".bz2":
cfile = bz2.BZ2File(str(fname))
elif extension == ".zip":
# NOTE: Zip format may contain more than one file in the archive
# (similar to tar), here we assume that there is just one file per
# zipfile! Also, we ask for the name because it can be different from
# the zipfile file!!
zfile = zipfile.ZipFile(str(fname))
name = zfile.namelist()[0]
cfile = zfile.open(name)
else:
raise ValueError(
"Unrecognized file extension. Expected .gzip, .bz2, or .zip, got {}".format(
extension
)
)
contents = cfile.read()
cfile.close()
return contents
def _read_file(fname):
if not isinstance(fname, Path):
fname = Path(fname).resolve()
extension = fname.suffix.lower()
if extension in [".gzip", ".gz", ".bz2", ".zip"]:
contents = _open_compressed(fname)
elif extension in [".cnv", ".edf", ".txt", ".ros", ".btl"]:
contents = fname.read_bytes()
else:
raise ValueError(
f"Unrecognized file extension. Expected .cnv, .edf, .txt, .ros, or .btl got {extension}"
)
# Read as bytes but we need to return strings for the parsers.
text = contents.decode(encoding="utf-8", errors="replace")
return StringIO(text)
def _parse_seabird(lines, ftype="cnv"):
# Initialize variables.
lon = lat = time = None, None, None
skiprows = 0
metadata = {}
header, config, names = [], [], []
for k, line in enumerate(lines):
line = line.strip()
# Only cnv has columns names, for bottle files we will use the variable row.
if ftype == "cnv":
if "# name" in line:
name, unit = line.split("=")[1].split(":")
name, unit = list(map(_normalize_names, (name, unit)))
names.append(name)
# Seabird headers starts with *.
if line.startswith("*"):
header.append(line)
# Seabird configuration starts with #.
if line.startswith("#"):
config.append(line)
# NMEA position and time.
if "NMEA Latitude" in line:
hemisphere = line[-1]
lat = line.strip(hemisphere).split("=")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA Longitude" in line:
hemisphere = line[-1]
lon = line.strip(hemisphere).split("=")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA UTC (Time)" in line:
time = line.split("=")[-1].strip()
# Should use some fuzzy datetime parser to make this more robust.
time = datetime.strptime(time, "%b %d %Y %H:%M:%S")
# cnv file header ends with *END* while
if ftype == "cnv":
if line == "*END*":
skiprows = k + 1
break
else: # btl.
# There is no *END* like in a .cnv file, skip two after header info.
if not (line.startswith("*") | line.startswith("#")):
# Fix commonly occurring problem when Sbeox.* exists in the file
# the name is concatenated to previous parameter
# example:
# CStarAt0Sbeox0Mm/Kg to CStarAt0 Sbeox0Mm/Kg (really two different params)
line = re.sub(r"(\S)Sbeox", "\\1 Sbeox", line)
names = line.split()
skiprows = k + 2
break
if ftype == "btl":
# Capture stat names column.
names.append("Statistic")
metadata.update(
{
"header": "\n".join(header),
"config": "\n".join(config),
"names": names,
"skiprows": skiprows,
"time": time,
"lon": lon,
"lat": lat,
}
)
return metadata
def from_bl(fname):
"""Read Seabird bottle-trip (bl) file
Example
-------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> df = ctd.from_bl(str(data_path.joinpath('bl', 'bottletest.bl')))
>>> df._metadata["time_of_reset"]
datetime.datetime(2018, 6, 25, 20, 8, 55)
"""
df = pd.read_csv(
fname,
skiprows=2,
parse_dates=[1],
index_col=0,
names=["bottle_number", "time", "startscan", "endscan"],
)
df._metadata = {
"time_of_reset": pd.to_datetime(
linecache.getline(str(fname), 2)[6:-1]
).to_pydatetime()
}
return df
def from_btl(fname):
"""
DataFrame constructor to open Seabird CTD BTL-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> bottles = ctd.from_btl(data_path.joinpath('btl', 'bottletest.btl'))
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="btl")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=False,
names=metadata["names"],
parse_dates=False,
skiprows=metadata["skiprows"],
)
f.close()
# At this point the data frame is not correctly lined up (multiple rows
# for avg, std, min, max or just avg, std, etc).
# Also needs date,time,and bottle number to be converted to one per line.
# Get row types, see what you have: avg, std, min, max or just avg, std.
rowtypes = df[df.columns[-1]].unique()
# Get times and dates which occur on second line of each bottle.
dates = df.iloc[:: len(rowtypes), 1].reset_index(drop=True)
times = df.iloc[1 :: len(rowtypes), 1].reset_index(drop=True)
datetimes = dates + " " + times
# Fill the Date column with datetimes.
df.loc[:: len(rowtypes), "Date"] = datetimes.values
df.loc[1 :: len(rowtypes), "Date"] = datetimes.values
# Fill missing rows.
df["Bottle"] = df["Bottle"].fillna(method="ffill")
df["Date"] = df["Date"].fillna(method="ffill")
df["Statistic"] = df["Statistic"].str.replace(r"\(|\)", "") # (avg) to avg
name = _basename(fname)[1]
dtypes = {
"bpos": int,
"pumps": bool,
"flag": bool,
"Bottle": int,
"Scan": int,
"Statistic": str,
"Date": str,
}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
df["Date"] = pd.to_datetime(df["Date"])
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
def from_edf(fname):
"""
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz'))
>>> ax = cast['temperature'].plot_cast()
"""
f = _read_file(fname)
header, names = [], []
for k, line in enumerate(f.readlines()):
line = line.strip()
if line.startswith("Serial Number"):
serial = line.strip().split(":")[1].strip()
elif line.startswith("Latitude"):
try:
hemisphere = line[-1]
lat = line.strip(hemisphere).split(":")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
except (IndexError, ValueError):
lat = None
elif line.startswith("Longitude"):
try:
hemisphere = line[-1]
lon = line.strip(hemisphere).split(":")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
except (IndexError, ValueError):
lon = None
else:
header.append(line)
if line.startswith("Field"):
col, unit = [l.strip().lower() for l in line.split(":")]
names.append(unit.split()[0])
if line == "// Data":
skiprows = k + 1
break
f.seek(0)
df = pd.read_csv(
f,
header=None,
index_col=None,
names=names,
skiprows=skiprows,
delim_whitespace=True,
)
f.close()
df.set_index("depth", drop=True, inplace=True)
df.index.name = "Depth [m]"
name = _basename(fname)[1]
metadata = {
"lon": lon,
"lat": lat,
"name": str(name),
"header": "\n".join(header),
"serial": serial,
}
setattr(df, "_metadata", metadata)
return df
def from_fsi(fname, skiprows=9):
"""
DataFrame constructor to open Falmouth Scientific, Inc. (FSI) CTD
ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_fsi(data_path.joinpath('FSI.txt.gz'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['TEMP'].plot_cast()
"""
f = _read_file(fname)
df = pd.read_csv(
f,
header="infer",
index_col=None,
skiprows=skiprows,
dtype=float,
delim_whitespace=True,
)
f.close()
df.set_index("PRES", drop=True, inplace=True)
df.index.name = "Pressure [dbar]"
metadata = {"name": str(fname)}
setattr(df, "_metadata", metadata)
return df
def rosette_summary(fname):
"""
Make a BTL (bottle) file from a ROS (bottle log) file.
More control for the averaging process and at which step we want to
perform this averaging eliminating the need to read the data into SBE
Software again after pre-processing.
NOTE: Do not run LoopEdit on the upcast!
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> fname = data_path.joinpath('CTD/g01l01s01.ros')
>>> ros = ctd.rosette_summary(fname)
>>> ros = ros.groupby(ros.index).mean()
>>> ros.pressure.values.astype(int)
array([835, 806, 705, 604, 503, 404, 303, 201, 151, 100, 51, 1])
"""
ros = from_cnv(fname)
ros["pressure"] = ros.index.values.astype(float)
ros["nbf"] = ros["nbf"].astype(int)
ros.set_index("nbf", drop=True, inplace=True, verify_integrity=False)
return ros
|
pyoceans/python-ctd | ctd/read.py | from_fsi | python | def from_fsi(fname, skiprows=9):
f = _read_file(fname)
df = pd.read_csv(
f,
header="infer",
index_col=None,
skiprows=skiprows,
dtype=float,
delim_whitespace=True,
)
f.close()
df.set_index("PRES", drop=True, inplace=True)
df.index.name = "Pressure [dbar]"
metadata = {"name": str(fname)}
setattr(df, "_metadata", metadata)
return df | DataFrame constructor to open Falmouth Scientific, Inc. (FSI) CTD
ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_fsi(data_path.joinpath('FSI.txt.gz'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['TEMP'].plot_cast() | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/read.py#L395-L425 | [
"def _read_file(fname):\n if not isinstance(fname, Path):\n fname = Path(fname).resolve()\n\n extension = fname.suffix.lower()\n if extension in [\".gzip\", \".gz\", \".bz2\", \".zip\"]:\n contents = _open_compressed(fname)\n elif extension in [\".cnv\", \".edf\", \".txt\", \".ros\", \".btl\"]:\n contents = fname.read_bytes()\n else:\n raise ValueError(\n f\"Unrecognized file extension. Expected .cnv, .edf, .txt, .ros, or .btl got {extension}\"\n )\n # Read as bytes but we need to return strings for the parsers.\n text = contents.decode(encoding=\"utf-8\", errors=\"replace\")\n return StringIO(text)\n"
] | import bz2
import gzip
import linecache
import re
import warnings
import zipfile
from datetime import datetime
from io import StringIO
from pathlib import Path
import numpy as np
import pandas as pd
def _basename(fname):
"""Return file name without path."""
if not isinstance(fname, Path):
fname = Path(fname)
path, name, ext = fname.parent, fname.stem, fname.suffix
return path, name, ext
def _normalize_names(name):
name = name.strip()
name = name.strip("*")
return name
def _open_compressed(fname):
extension = fname.suffix.lower()
if extension in [".gzip", ".gz"]:
cfile = gzip.open(str(fname))
elif extension == ".bz2":
cfile = bz2.BZ2File(str(fname))
elif extension == ".zip":
# NOTE: Zip format may contain more than one file in the archive
# (similar to tar), here we assume that there is just one file per
# zipfile! Also, we ask for the name because it can be different from
# the zipfile file!!
zfile = zipfile.ZipFile(str(fname))
name = zfile.namelist()[0]
cfile = zfile.open(name)
else:
raise ValueError(
"Unrecognized file extension. Expected .gzip, .bz2, or .zip, got {}".format(
extension
)
)
contents = cfile.read()
cfile.close()
return contents
def _read_file(fname):
if not isinstance(fname, Path):
fname = Path(fname).resolve()
extension = fname.suffix.lower()
if extension in [".gzip", ".gz", ".bz2", ".zip"]:
contents = _open_compressed(fname)
elif extension in [".cnv", ".edf", ".txt", ".ros", ".btl"]:
contents = fname.read_bytes()
else:
raise ValueError(
f"Unrecognized file extension. Expected .cnv, .edf, .txt, .ros, or .btl got {extension}"
)
# Read as bytes but we need to return strings for the parsers.
text = contents.decode(encoding="utf-8", errors="replace")
return StringIO(text)
def _parse_seabird(lines, ftype="cnv"):
# Initialize variables.
lon = lat = time = None, None, None
skiprows = 0
metadata = {}
header, config, names = [], [], []
for k, line in enumerate(lines):
line = line.strip()
# Only cnv has columns names, for bottle files we will use the variable row.
if ftype == "cnv":
if "# name" in line:
name, unit = line.split("=")[1].split(":")
name, unit = list(map(_normalize_names, (name, unit)))
names.append(name)
# Seabird headers starts with *.
if line.startswith("*"):
header.append(line)
# Seabird configuration starts with #.
if line.startswith("#"):
config.append(line)
# NMEA position and time.
if "NMEA Latitude" in line:
hemisphere = line[-1]
lat = line.strip(hemisphere).split("=")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA Longitude" in line:
hemisphere = line[-1]
lon = line.strip(hemisphere).split("=")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA UTC (Time)" in line:
time = line.split("=")[-1].strip()
# Should use some fuzzy datetime parser to make this more robust.
time = datetime.strptime(time, "%b %d %Y %H:%M:%S")
# cnv file header ends with *END* while
if ftype == "cnv":
if line == "*END*":
skiprows = k + 1
break
else: # btl.
# There is no *END* like in a .cnv file, skip two after header info.
if not (line.startswith("*") | line.startswith("#")):
# Fix commonly occurring problem when Sbeox.* exists in the file
# the name is concatenated to previous parameter
# example:
# CStarAt0Sbeox0Mm/Kg to CStarAt0 Sbeox0Mm/Kg (really two different params)
line = re.sub(r"(\S)Sbeox", "\\1 Sbeox", line)
names = line.split()
skiprows = k + 2
break
if ftype == "btl":
# Capture stat names column.
names.append("Statistic")
metadata.update(
{
"header": "\n".join(header),
"config": "\n".join(config),
"names": names,
"skiprows": skiprows,
"time": time,
"lon": lon,
"lat": lat,
}
)
return metadata
def from_bl(fname):
"""Read Seabird bottle-trip (bl) file
Example
-------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> df = ctd.from_bl(str(data_path.joinpath('bl', 'bottletest.bl')))
>>> df._metadata["time_of_reset"]
datetime.datetime(2018, 6, 25, 20, 8, 55)
"""
df = pd.read_csv(
fname,
skiprows=2,
parse_dates=[1],
index_col=0,
names=["bottle_number", "time", "startscan", "endscan"],
)
df._metadata = {
"time_of_reset": pd.to_datetime(
linecache.getline(str(fname), 2)[6:-1]
).to_pydatetime()
}
return df
def from_btl(fname):
"""
DataFrame constructor to open Seabird CTD BTL-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> bottles = ctd.from_btl(data_path.joinpath('btl', 'bottletest.btl'))
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="btl")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=False,
names=metadata["names"],
parse_dates=False,
skiprows=metadata["skiprows"],
)
f.close()
# At this point the data frame is not correctly lined up (multiple rows
# for avg, std, min, max or just avg, std, etc).
# Also needs date,time,and bottle number to be converted to one per line.
# Get row types, see what you have: avg, std, min, max or just avg, std.
rowtypes = df[df.columns[-1]].unique()
# Get times and dates which occur on second line of each bottle.
dates = df.iloc[:: len(rowtypes), 1].reset_index(drop=True)
times = df.iloc[1 :: len(rowtypes), 1].reset_index(drop=True)
datetimes = dates + " " + times
# Fill the Date column with datetimes.
df.loc[:: len(rowtypes), "Date"] = datetimes.values
df.loc[1 :: len(rowtypes), "Date"] = datetimes.values
# Fill missing rows.
df["Bottle"] = df["Bottle"].fillna(method="ffill")
df["Date"] = df["Date"].fillna(method="ffill")
df["Statistic"] = df["Statistic"].str.replace(r"\(|\)", "") # (avg) to avg
name = _basename(fname)[1]
dtypes = {
"bpos": int,
"pumps": bool,
"flag": bool,
"Bottle": int,
"Scan": int,
"Statistic": str,
"Date": str,
}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
df["Date"] = pd.to_datetime(df["Date"])
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
def from_edf(fname):
"""
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz'))
>>> ax = cast['temperature'].plot_cast()
"""
f = _read_file(fname)
header, names = [], []
for k, line in enumerate(f.readlines()):
line = line.strip()
if line.startswith("Serial Number"):
serial = line.strip().split(":")[1].strip()
elif line.startswith("Latitude"):
try:
hemisphere = line[-1]
lat = line.strip(hemisphere).split(":")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
except (IndexError, ValueError):
lat = None
elif line.startswith("Longitude"):
try:
hemisphere = line[-1]
lon = line.strip(hemisphere).split(":")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
except (IndexError, ValueError):
lon = None
else:
header.append(line)
if line.startswith("Field"):
col, unit = [l.strip().lower() for l in line.split(":")]
names.append(unit.split()[0])
if line == "// Data":
skiprows = k + 1
break
f.seek(0)
df = pd.read_csv(
f,
header=None,
index_col=None,
names=names,
skiprows=skiprows,
delim_whitespace=True,
)
f.close()
df.set_index("depth", drop=True, inplace=True)
df.index.name = "Depth [m]"
name = _basename(fname)[1]
metadata = {
"lon": lon,
"lat": lat,
"name": str(name),
"header": "\n".join(header),
"serial": serial,
}
setattr(df, "_metadata", metadata)
return df
def from_cnv(fname):
"""
DataFrame constructor to open Seabird CTD CNV-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['t090C'].plot_cast()
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="cnv")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=None,
names=metadata["names"],
skiprows=metadata["skiprows"],
delim_whitespace=True,
widths=[11] * len(metadata["names"]),
)
f.close()
key_set = False
prkeys = ["prDM", "prdM", "pr"]
for prkey in prkeys:
try:
df.set_index(prkey, drop=True, inplace=True)
key_set = True
except KeyError:
continue
if not key_set:
raise KeyError(
f"Could not find pressure field (supported names are {prkeys})."
)
df.index.name = "Pressure [dbar]"
name = _basename(fname)[1]
dtypes = {"bpos": int, "pumps": bool, "flag": bool}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
def rosette_summary(fname):
"""
Make a BTL (bottle) file from a ROS (bottle log) file.
More control for the averaging process and at which step we want to
perform this averaging eliminating the need to read the data into SBE
Software again after pre-processing.
NOTE: Do not run LoopEdit on the upcast!
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> fname = data_path.joinpath('CTD/g01l01s01.ros')
>>> ros = ctd.rosette_summary(fname)
>>> ros = ros.groupby(ros.index).mean()
>>> ros.pressure.values.astype(int)
array([835, 806, 705, 604, 503, 404, 303, 201, 151, 100, 51, 1])
"""
ros = from_cnv(fname)
ros["pressure"] = ros.index.values.astype(float)
ros["nbf"] = ros["nbf"].astype(int)
ros.set_index("nbf", drop=True, inplace=True, verify_integrity=False)
return ros
|
pyoceans/python-ctd | ctd/read.py | rosette_summary | python | def rosette_summary(fname):
ros = from_cnv(fname)
ros["pressure"] = ros.index.values.astype(float)
ros["nbf"] = ros["nbf"].astype(int)
ros.set_index("nbf", drop=True, inplace=True, verify_integrity=False)
return ros | Make a BTL (bottle) file from a ROS (bottle log) file.
More control for the averaging process and at which step we want to
perform this averaging eliminating the need to read the data into SBE
Software again after pre-processing.
NOTE: Do not run LoopEdit on the upcast!
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> fname = data_path.joinpath('CTD/g01l01s01.ros')
>>> ros = ctd.rosette_summary(fname)
>>> ros = ros.groupby(ros.index).mean()
>>> ros.pressure.values.astype(int)
array([835, 806, 705, 604, 503, 404, 303, 201, 151, 100, 51, 1]) | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/read.py#L428-L453 | [
"def from_cnv(fname):\n \"\"\"\n DataFrame constructor to open Seabird CTD CNV-ASCII format.\n\n Examples\n --------\n >>> from pathlib import Path\n >>> import ctd\n >>> data_path = Path(__file__).parents[1].joinpath(\"tests\", \"data\")\n >>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2'))\n >>> downcast, upcast = cast.split()\n >>> ax = downcast['t090C'].plot_cast()\n\n \"\"\"\n f = _read_file(fname)\n metadata = _parse_seabird(f.readlines(), ftype=\"cnv\")\n\n f.seek(0)\n df = pd.read_fwf(\n f,\n header=None,\n index_col=None,\n names=metadata[\"names\"],\n skiprows=metadata[\"skiprows\"],\n delim_whitespace=True,\n widths=[11] * len(metadata[\"names\"]),\n )\n f.close()\n\n key_set = False\n prkeys = [\"prDM\", \"prdM\", \"pr\"]\n for prkey in prkeys:\n try:\n df.set_index(prkey, drop=True, inplace=True)\n key_set = True\n except KeyError:\n continue\n if not key_set:\n raise KeyError(\n f\"Could not find pressure field (supported names are {prkeys}).\"\n )\n df.index.name = \"Pressure [dbar]\"\n\n name = _basename(fname)[1]\n\n dtypes = {\"bpos\": int, \"pumps\": bool, \"flag\": bool}\n for column in df.columns:\n if column in dtypes:\n df[column] = df[column].astype(dtypes[column])\n else:\n try:\n df[column] = df[column].astype(float)\n except ValueError:\n warnings.warn(\"Could not convert %s to float.\" % column)\n\n metadata[\"name\"] = str(name)\n setattr(df, \"_metadata\", metadata)\n return df\n"
] | import bz2
import gzip
import linecache
import re
import warnings
import zipfile
from datetime import datetime
from io import StringIO
from pathlib import Path
import numpy as np
import pandas as pd
def _basename(fname):
"""Return file name without path."""
if not isinstance(fname, Path):
fname = Path(fname)
path, name, ext = fname.parent, fname.stem, fname.suffix
return path, name, ext
def _normalize_names(name):
name = name.strip()
name = name.strip("*")
return name
def _open_compressed(fname):
extension = fname.suffix.lower()
if extension in [".gzip", ".gz"]:
cfile = gzip.open(str(fname))
elif extension == ".bz2":
cfile = bz2.BZ2File(str(fname))
elif extension == ".zip":
# NOTE: Zip format may contain more than one file in the archive
# (similar to tar), here we assume that there is just one file per
# zipfile! Also, we ask for the name because it can be different from
# the zipfile file!!
zfile = zipfile.ZipFile(str(fname))
name = zfile.namelist()[0]
cfile = zfile.open(name)
else:
raise ValueError(
"Unrecognized file extension. Expected .gzip, .bz2, or .zip, got {}".format(
extension
)
)
contents = cfile.read()
cfile.close()
return contents
def _read_file(fname):
if not isinstance(fname, Path):
fname = Path(fname).resolve()
extension = fname.suffix.lower()
if extension in [".gzip", ".gz", ".bz2", ".zip"]:
contents = _open_compressed(fname)
elif extension in [".cnv", ".edf", ".txt", ".ros", ".btl"]:
contents = fname.read_bytes()
else:
raise ValueError(
f"Unrecognized file extension. Expected .cnv, .edf, .txt, .ros, or .btl got {extension}"
)
# Read as bytes but we need to return strings for the parsers.
text = contents.decode(encoding="utf-8", errors="replace")
return StringIO(text)
def _parse_seabird(lines, ftype="cnv"):
# Initialize variables.
lon = lat = time = None, None, None
skiprows = 0
metadata = {}
header, config, names = [], [], []
for k, line in enumerate(lines):
line = line.strip()
# Only cnv has columns names, for bottle files we will use the variable row.
if ftype == "cnv":
if "# name" in line:
name, unit = line.split("=")[1].split(":")
name, unit = list(map(_normalize_names, (name, unit)))
names.append(name)
# Seabird headers starts with *.
if line.startswith("*"):
header.append(line)
# Seabird configuration starts with #.
if line.startswith("#"):
config.append(line)
# NMEA position and time.
if "NMEA Latitude" in line:
hemisphere = line[-1]
lat = line.strip(hemisphere).split("=")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA Longitude" in line:
hemisphere = line[-1]
lon = line.strip(hemisphere).split("=")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA UTC (Time)" in line:
time = line.split("=")[-1].strip()
# Should use some fuzzy datetime parser to make this more robust.
time = datetime.strptime(time, "%b %d %Y %H:%M:%S")
# cnv file header ends with *END* while
if ftype == "cnv":
if line == "*END*":
skiprows = k + 1
break
else: # btl.
# There is no *END* like in a .cnv file, skip two after header info.
if not (line.startswith("*") | line.startswith("#")):
# Fix commonly occurring problem when Sbeox.* exists in the file
# the name is concatenated to previous parameter
# example:
# CStarAt0Sbeox0Mm/Kg to CStarAt0 Sbeox0Mm/Kg (really two different params)
line = re.sub(r"(\S)Sbeox", "\\1 Sbeox", line)
names = line.split()
skiprows = k + 2
break
if ftype == "btl":
# Capture stat names column.
names.append("Statistic")
metadata.update(
{
"header": "\n".join(header),
"config": "\n".join(config),
"names": names,
"skiprows": skiprows,
"time": time,
"lon": lon,
"lat": lat,
}
)
return metadata
def from_bl(fname):
"""Read Seabird bottle-trip (bl) file
Example
-------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> df = ctd.from_bl(str(data_path.joinpath('bl', 'bottletest.bl')))
>>> df._metadata["time_of_reset"]
datetime.datetime(2018, 6, 25, 20, 8, 55)
"""
df = pd.read_csv(
fname,
skiprows=2,
parse_dates=[1],
index_col=0,
names=["bottle_number", "time", "startscan", "endscan"],
)
df._metadata = {
"time_of_reset": pd.to_datetime(
linecache.getline(str(fname), 2)[6:-1]
).to_pydatetime()
}
return df
def from_btl(fname):
"""
DataFrame constructor to open Seabird CTD BTL-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> bottles = ctd.from_btl(data_path.joinpath('btl', 'bottletest.btl'))
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="btl")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=False,
names=metadata["names"],
parse_dates=False,
skiprows=metadata["skiprows"],
)
f.close()
# At this point the data frame is not correctly lined up (multiple rows
# for avg, std, min, max or just avg, std, etc).
# Also needs date,time,and bottle number to be converted to one per line.
# Get row types, see what you have: avg, std, min, max or just avg, std.
rowtypes = df[df.columns[-1]].unique()
# Get times and dates which occur on second line of each bottle.
dates = df.iloc[:: len(rowtypes), 1].reset_index(drop=True)
times = df.iloc[1 :: len(rowtypes), 1].reset_index(drop=True)
datetimes = dates + " " + times
# Fill the Date column with datetimes.
df.loc[:: len(rowtypes), "Date"] = datetimes.values
df.loc[1 :: len(rowtypes), "Date"] = datetimes.values
# Fill missing rows.
df["Bottle"] = df["Bottle"].fillna(method="ffill")
df["Date"] = df["Date"].fillna(method="ffill")
df["Statistic"] = df["Statistic"].str.replace(r"\(|\)", "") # (avg) to avg
name = _basename(fname)[1]
dtypes = {
"bpos": int,
"pumps": bool,
"flag": bool,
"Bottle": int,
"Scan": int,
"Statistic": str,
"Date": str,
}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
df["Date"] = pd.to_datetime(df["Date"])
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
def from_edf(fname):
"""
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz'))
>>> ax = cast['temperature'].plot_cast()
"""
f = _read_file(fname)
header, names = [], []
for k, line in enumerate(f.readlines()):
line = line.strip()
if line.startswith("Serial Number"):
serial = line.strip().split(":")[1].strip()
elif line.startswith("Latitude"):
try:
hemisphere = line[-1]
lat = line.strip(hemisphere).split(":")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
except (IndexError, ValueError):
lat = None
elif line.startswith("Longitude"):
try:
hemisphere = line[-1]
lon = line.strip(hemisphere).split(":")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
except (IndexError, ValueError):
lon = None
else:
header.append(line)
if line.startswith("Field"):
col, unit = [l.strip().lower() for l in line.split(":")]
names.append(unit.split()[0])
if line == "// Data":
skiprows = k + 1
break
f.seek(0)
df = pd.read_csv(
f,
header=None,
index_col=None,
names=names,
skiprows=skiprows,
delim_whitespace=True,
)
f.close()
df.set_index("depth", drop=True, inplace=True)
df.index.name = "Depth [m]"
name = _basename(fname)[1]
metadata = {
"lon": lon,
"lat": lat,
"name": str(name),
"header": "\n".join(header),
"serial": serial,
}
setattr(df, "_metadata", metadata)
return df
def from_cnv(fname):
"""
DataFrame constructor to open Seabird CTD CNV-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['t090C'].plot_cast()
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="cnv")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=None,
names=metadata["names"],
skiprows=metadata["skiprows"],
delim_whitespace=True,
widths=[11] * len(metadata["names"]),
)
f.close()
key_set = False
prkeys = ["prDM", "prdM", "pr"]
for prkey in prkeys:
try:
df.set_index(prkey, drop=True, inplace=True)
key_set = True
except KeyError:
continue
if not key_set:
raise KeyError(
f"Could not find pressure field (supported names are {prkeys})."
)
df.index.name = "Pressure [dbar]"
name = _basename(fname)[1]
dtypes = {"bpos": int, "pumps": bool, "flag": bool}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
def from_fsi(fname, skiprows=9):
"""
DataFrame constructor to open Falmouth Scientific, Inc. (FSI) CTD
ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_fsi(data_path.joinpath('FSI.txt.gz'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['TEMP'].plot_cast()
"""
f = _read_file(fname)
df = pd.read_csv(
f,
header="infer",
index_col=None,
skiprows=skiprows,
dtype=float,
delim_whitespace=True,
)
f.close()
df.set_index("PRES", drop=True, inplace=True)
df.index.name = "Pressure [dbar]"
metadata = {"name": str(fname)}
setattr(df, "_metadata", metadata)
return df
|
pyoceans/python-ctd | ctd/extras.py | _extrap1d | python | def _extrap1d(interpolator):
xs, ys = interpolator.x, interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])
elif x > xs[-1]:
return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (
xs[-1] - xs[-2]
)
else:
return interpolator(x)
def ufunclike(xs):
return np.array(list(map(pointwise, np.array(xs))))
return ufunclike | http://stackoverflow.com/questions/2745329/
How to make scipy.interpolate return an extrapolated result beyond the
input range. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/extras.py#L7-L29 | null | import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
from pandas import Series
def get_maxdepth(self):
valid_last_depth = self.apply(Series.notnull).values.T
return np.float_(self.index.values * valid_last_depth).max(axis=1)
def extrap_sec(data, dist, depth, w1=1.0, w2=0):
"""
Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable
"""
from scipy.interpolate import interp1d
new_data1 = []
for row in data:
mask = ~np.isnan(row)
if mask.any():
y = row[mask]
if y.size == 1:
row = np.repeat(y, len(mask))
else:
x = dist[mask]
f_i = interp1d(x, y)
f_x = _extrap1d(f_i)
row = f_x(dist)
new_data1.append(row)
new_data2 = []
for col in data.T:
mask = ~np.isnan(col)
if mask.any():
y = col[mask]
if y.size == 1:
col = np.repeat(y, len(mask))
else:
z = depth[mask]
f_i = interp1d(z, y)
f_z = _extrap1d(f_i)
col = f_z(depth)
new_data2.append(col)
new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2
return new_data
def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False):
"""
Generates a topography mask from an oceanographic transect taking the
deepest CTD scan as the depth of each station.
Inputs
------
h : array
Pressure of the deepest CTD scan for each station [dbar].
lons : array
Longitude of each station [decimal degrees east].
lat : Latitude of each station. [decimal degrees north].
dx : float
Horizontal resolution of the output arrays [km].
kind : string, optional
Type of the interpolation to be performed.
See scipy.interpolate.interp1d documentation for details.
plot : bool
Whether to plot mask for visualization.
Outputs
-------
xm : array
Horizontal distances [km].
hm : array
Local depth [m].
Author
------
André Palóczy Filho (paloczy@gmail.com) -- October/2012
"""
import gsw
from scipy.interpolate import interp1d
h, lon, lat = list(map(np.asanyarray, (h, lon, lat)))
# Distance in km.
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
h = -gsw.z_from_p(h, lat.mean())
Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
xm = np.arange(0, x.max() + dx, dx)
hm = Ih(xm)
return xm, hm
def plot_section(self, reverse=False, filled=False, **kw):
import gsw
lon, lat, data = list(
map(np.asanyarray, (self.lon, self.lat, self.values))
)
data = ma.masked_invalid(data)
h = self.get_maxdepth()
if reverse:
lon = lon[::-1]
lat = lat[::-1]
data = data.T[::-1].T
h = h[::-1]
lon, lat = map(np.atleast_2d, (lon, lat))
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
z = self.index.values.astype(float)
if filled: # CAVEAT: this method cause discontinuities.
data = data.filled(fill_value=np.nan)
data = extrap_sec(data, x, z, w1=0.97, w2=0.03)
# Contour key words.
extend = kw.pop("extend", "both")
fontsize = kw.pop("fontsize", 12)
labelsize = kw.pop("labelsize", 11)
cmap = kw.pop("cmap", plt.cm.rainbow)
levels = kw.pop(
"levels",
np.arange(np.floor(data.min()), np.ceil(data.max()) + 0.5, 0.5),
)
# Colorbar key words.
pad = kw.pop("pad", 0.04)
aspect = kw.pop("aspect", 40)
shrink = kw.pop("shrink", 0.9)
fraction = kw.pop("fraction", 0.05)
# Topography mask key words.
dx = kw.pop("dx", 1.0)
kind = kw.pop("kind", "linear")
linewidth = kw.pop("linewidth", 1.5)
# Station symbols key words.
station_marker = kw.pop("station_marker", None)
color = kw.pop("color", "k")
offset = kw.pop("offset", -5)
alpha = kw.pop("alpha", 0.5)
# Figure.
figsize = kw.pop("figsize", (12, 6))
fig, ax = plt.subplots(figsize=figsize)
xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
ax.plot(xm, hm, color="black", linewidth=linewidth, zorder=3)
ax.fill_between(xm, hm, y2=hm.max(), color="0.9", zorder=3)
if station_marker:
ax.plot(
x,
[offset] * len(h),
color=color,
marker=station_marker,
alpha=alpha,
zorder=5,
)
ax.set_xlabel("Cross-shore distance [km]", fontsize=fontsize)
ax.set_ylabel("Depth [m]", fontsize=fontsize)
ax.set_ylim(offset, hm.max())
ax.invert_yaxis()
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
ax.yaxis.set_ticks_position("left")
ax.yaxis.set_label_position("left")
ax.xaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
ax.yaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
# Color version.
cs = ax.contourf(
x,
z,
data,
cmap=cmap,
levels=levels,
alpha=1.0,
extend=extend,
zorder=2,
) # manual=True
# Colorbar.
cb = fig.colorbar(
mappable=cs,
ax=ax,
orientation="vertical",
aspect=aspect,
shrink=shrink,
fraction=fraction,
pad=pad,
)
return fig, ax, cb
def cell_thermal_mass(temperature, conductivity):
"""
Sample interval is measured in seconds.
Temperature in degrees.
CTM is calculated in S/m.
"""
alpha = 0.03 # Thermal anomaly amplitude.
beta = 1.0 / 7 # Thermal anomaly time constant (1/beta).
sample_interval = 1 / 15.0
a = 2 * alpha / (sample_interval * beta + 2)
b = 1 - (2 * a / alpha)
dCodT = 0.1 * (1 + 0.006 * [temperature - 20])
dT = np.diff(temperature)
ctm = -1.0 * b * conductivity + a * (dCodT) * dT # [S/m]
return ctm
def mixed_layer_depth(CT, method="half degree"):
if method == "half degree":
mask = CT[0] - CT < 0.5
else:
mask = np.zeros_like(CT)
return Series(mask, index=CT.index, name="MLD")
def barrier_layer_thickness(SA, CT):
"""
Compute the thickness of water separating the mixed surface layer from the
thermocline. A more precise definition would be the difference between
mixed layer depth (MLD) calculated from temperature minus the mixed layer
depth calculated using density.
"""
import gsw
sigma_theta = gsw.sigma0(SA, CT)
mask = mixed_layer_depth(CT)
mld = np.where(mask)[0][-1]
sig_surface = sigma_theta[0]
sig_bottom_mld = gsw.sigma0(SA[0], CT[mld])
d_sig_t = sig_surface - sig_bottom_mld
d_sig = sigma_theta - sig_bottom_mld
mask = d_sig < d_sig_t # Barrier layer.
return Series(mask, index=SA.index, name="BLT")
|
pyoceans/python-ctd | ctd/extras.py | extrap_sec | python | def extrap_sec(data, dist, depth, w1=1.0, w2=0):
from scipy.interpolate import interp1d
new_data1 = []
for row in data:
mask = ~np.isnan(row)
if mask.any():
y = row[mask]
if y.size == 1:
row = np.repeat(y, len(mask))
else:
x = dist[mask]
f_i = interp1d(x, y)
f_x = _extrap1d(f_i)
row = f_x(dist)
new_data1.append(row)
new_data2 = []
for col in data.T:
mask = ~np.isnan(col)
if mask.any():
y = col[mask]
if y.size == 1:
col = np.repeat(y, len(mask))
else:
z = depth[mask]
f_i = interp1d(z, y)
f_z = _extrap1d(f_i)
col = f_z(depth)
new_data2.append(col)
new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2
return new_data | Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/extras.py#L37-L93 | [
"def _extrap1d(interpolator):\n \"\"\"\n http://stackoverflow.com/questions/2745329/\n How to make scipy.interpolate return an extrapolated result beyond the\n input range.\n\n \"\"\"\n xs, ys = interpolator.x, interpolator.y\n\n def pointwise(x):\n if x < xs[0]:\n return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])\n elif x > xs[-1]:\n return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (\n xs[-1] - xs[-2]\n )\n else:\n return interpolator(x)\n\n def ufunclike(xs):\n return np.array(list(map(pointwise, np.array(xs))))\n\n return ufunclike\n",
"def ufunclike(xs):\n return np.array(list(map(pointwise, np.array(xs))))\n"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
from pandas import Series
def _extrap1d(interpolator):
"""
http://stackoverflow.com/questions/2745329/
How to make scipy.interpolate return an extrapolated result beyond the
input range.
"""
xs, ys = interpolator.x, interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])
elif x > xs[-1]:
return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (
xs[-1] - xs[-2]
)
else:
return interpolator(x)
def ufunclike(xs):
return np.array(list(map(pointwise, np.array(xs))))
return ufunclike
def get_maxdepth(self):
valid_last_depth = self.apply(Series.notnull).values.T
return np.float_(self.index.values * valid_last_depth).max(axis=1)
def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False):
"""
Generates a topography mask from an oceanographic transect taking the
deepest CTD scan as the depth of each station.
Inputs
------
h : array
Pressure of the deepest CTD scan for each station [dbar].
lons : array
Longitude of each station [decimal degrees east].
lat : Latitude of each station. [decimal degrees north].
dx : float
Horizontal resolution of the output arrays [km].
kind : string, optional
Type of the interpolation to be performed.
See scipy.interpolate.interp1d documentation for details.
plot : bool
Whether to plot mask for visualization.
Outputs
-------
xm : array
Horizontal distances [km].
hm : array
Local depth [m].
Author
------
André Palóczy Filho (paloczy@gmail.com) -- October/2012
"""
import gsw
from scipy.interpolate import interp1d
h, lon, lat = list(map(np.asanyarray, (h, lon, lat)))
# Distance in km.
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
h = -gsw.z_from_p(h, lat.mean())
Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
xm = np.arange(0, x.max() + dx, dx)
hm = Ih(xm)
return xm, hm
def plot_section(self, reverse=False, filled=False, **kw):
import gsw
lon, lat, data = list(
map(np.asanyarray, (self.lon, self.lat, self.values))
)
data = ma.masked_invalid(data)
h = self.get_maxdepth()
if reverse:
lon = lon[::-1]
lat = lat[::-1]
data = data.T[::-1].T
h = h[::-1]
lon, lat = map(np.atleast_2d, (lon, lat))
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
z = self.index.values.astype(float)
if filled: # CAVEAT: this method cause discontinuities.
data = data.filled(fill_value=np.nan)
data = extrap_sec(data, x, z, w1=0.97, w2=0.03)
# Contour key words.
extend = kw.pop("extend", "both")
fontsize = kw.pop("fontsize", 12)
labelsize = kw.pop("labelsize", 11)
cmap = kw.pop("cmap", plt.cm.rainbow)
levels = kw.pop(
"levels",
np.arange(np.floor(data.min()), np.ceil(data.max()) + 0.5, 0.5),
)
# Colorbar key words.
pad = kw.pop("pad", 0.04)
aspect = kw.pop("aspect", 40)
shrink = kw.pop("shrink", 0.9)
fraction = kw.pop("fraction", 0.05)
# Topography mask key words.
dx = kw.pop("dx", 1.0)
kind = kw.pop("kind", "linear")
linewidth = kw.pop("linewidth", 1.5)
# Station symbols key words.
station_marker = kw.pop("station_marker", None)
color = kw.pop("color", "k")
offset = kw.pop("offset", -5)
alpha = kw.pop("alpha", 0.5)
# Figure.
figsize = kw.pop("figsize", (12, 6))
fig, ax = plt.subplots(figsize=figsize)
xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
ax.plot(xm, hm, color="black", linewidth=linewidth, zorder=3)
ax.fill_between(xm, hm, y2=hm.max(), color="0.9", zorder=3)
if station_marker:
ax.plot(
x,
[offset] * len(h),
color=color,
marker=station_marker,
alpha=alpha,
zorder=5,
)
ax.set_xlabel("Cross-shore distance [km]", fontsize=fontsize)
ax.set_ylabel("Depth [m]", fontsize=fontsize)
ax.set_ylim(offset, hm.max())
ax.invert_yaxis()
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
ax.yaxis.set_ticks_position("left")
ax.yaxis.set_label_position("left")
ax.xaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
ax.yaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
# Color version.
cs = ax.contourf(
x,
z,
data,
cmap=cmap,
levels=levels,
alpha=1.0,
extend=extend,
zorder=2,
) # manual=True
# Colorbar.
cb = fig.colorbar(
mappable=cs,
ax=ax,
orientation="vertical",
aspect=aspect,
shrink=shrink,
fraction=fraction,
pad=pad,
)
return fig, ax, cb
def cell_thermal_mass(temperature, conductivity):
"""
Sample interval is measured in seconds.
Temperature in degrees.
CTM is calculated in S/m.
"""
alpha = 0.03 # Thermal anomaly amplitude.
beta = 1.0 / 7 # Thermal anomaly time constant (1/beta).
sample_interval = 1 / 15.0
a = 2 * alpha / (sample_interval * beta + 2)
b = 1 - (2 * a / alpha)
dCodT = 0.1 * (1 + 0.006 * [temperature - 20])
dT = np.diff(temperature)
ctm = -1.0 * b * conductivity + a * (dCodT) * dT # [S/m]
return ctm
def mixed_layer_depth(CT, method="half degree"):
if method == "half degree":
mask = CT[0] - CT < 0.5
else:
mask = np.zeros_like(CT)
return Series(mask, index=CT.index, name="MLD")
def barrier_layer_thickness(SA, CT):
"""
Compute the thickness of water separating the mixed surface layer from the
thermocline. A more precise definition would be the difference between
mixed layer depth (MLD) calculated from temperature minus the mixed layer
depth calculated using density.
"""
import gsw
sigma_theta = gsw.sigma0(SA, CT)
mask = mixed_layer_depth(CT)
mld = np.where(mask)[0][-1]
sig_surface = sigma_theta[0]
sig_bottom_mld = gsw.sigma0(SA[0], CT[mld])
d_sig_t = sig_surface - sig_bottom_mld
d_sig = sigma_theta - sig_bottom_mld
mask = d_sig < d_sig_t # Barrier layer.
return Series(mask, index=SA.index, name="BLT")
|
pyoceans/python-ctd | ctd/extras.py | gen_topomask | python | def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False):
import gsw
from scipy.interpolate import interp1d
h, lon, lat = list(map(np.asanyarray, (h, lon, lat)))
# Distance in km.
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
h = -gsw.z_from_p(h, lat.mean())
Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
xm = np.arange(0, x.max() + dx, dx)
hm = Ih(xm)
return xm, hm | Generates a topography mask from an oceanographic transect taking the
deepest CTD scan as the depth of each station.
Inputs
------
h : array
Pressure of the deepest CTD scan for each station [dbar].
lons : array
Longitude of each station [decimal degrees east].
lat : Latitude of each station. [decimal degrees north].
dx : float
Horizontal resolution of the output arrays [km].
kind : string, optional
Type of the interpolation to be performed.
See scipy.interpolate.interp1d documentation for details.
plot : bool
Whether to plot mask for visualization.
Outputs
-------
xm : array
Horizontal distances [km].
hm : array
Local depth [m].
Author
------
André Palóczy Filho (paloczy@gmail.com) -- October/2012 | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/extras.py#L96-L140 | null | import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
from pandas import Series
def _extrap1d(interpolator):
"""
http://stackoverflow.com/questions/2745329/
How to make scipy.interpolate return an extrapolated result beyond the
input range.
"""
xs, ys = interpolator.x, interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])
elif x > xs[-1]:
return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (
xs[-1] - xs[-2]
)
else:
return interpolator(x)
def ufunclike(xs):
return np.array(list(map(pointwise, np.array(xs))))
return ufunclike
def get_maxdepth(self):
valid_last_depth = self.apply(Series.notnull).values.T
return np.float_(self.index.values * valid_last_depth).max(axis=1)
def extrap_sec(data, dist, depth, w1=1.0, w2=0):
"""
Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable
"""
from scipy.interpolate import interp1d
new_data1 = []
for row in data:
mask = ~np.isnan(row)
if mask.any():
y = row[mask]
if y.size == 1:
row = np.repeat(y, len(mask))
else:
x = dist[mask]
f_i = interp1d(x, y)
f_x = _extrap1d(f_i)
row = f_x(dist)
new_data1.append(row)
new_data2 = []
for col in data.T:
mask = ~np.isnan(col)
if mask.any():
y = col[mask]
if y.size == 1:
col = np.repeat(y, len(mask))
else:
z = depth[mask]
f_i = interp1d(z, y)
f_z = _extrap1d(f_i)
col = f_z(depth)
new_data2.append(col)
new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2
return new_data
def plot_section(self, reverse=False, filled=False, **kw):
import gsw
lon, lat, data = list(
map(np.asanyarray, (self.lon, self.lat, self.values))
)
data = ma.masked_invalid(data)
h = self.get_maxdepth()
if reverse:
lon = lon[::-1]
lat = lat[::-1]
data = data.T[::-1].T
h = h[::-1]
lon, lat = map(np.atleast_2d, (lon, lat))
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
z = self.index.values.astype(float)
if filled: # CAVEAT: this method cause discontinuities.
data = data.filled(fill_value=np.nan)
data = extrap_sec(data, x, z, w1=0.97, w2=0.03)
# Contour key words.
extend = kw.pop("extend", "both")
fontsize = kw.pop("fontsize", 12)
labelsize = kw.pop("labelsize", 11)
cmap = kw.pop("cmap", plt.cm.rainbow)
levels = kw.pop(
"levels",
np.arange(np.floor(data.min()), np.ceil(data.max()) + 0.5, 0.5),
)
# Colorbar key words.
pad = kw.pop("pad", 0.04)
aspect = kw.pop("aspect", 40)
shrink = kw.pop("shrink", 0.9)
fraction = kw.pop("fraction", 0.05)
# Topography mask key words.
dx = kw.pop("dx", 1.0)
kind = kw.pop("kind", "linear")
linewidth = kw.pop("linewidth", 1.5)
# Station symbols key words.
station_marker = kw.pop("station_marker", None)
color = kw.pop("color", "k")
offset = kw.pop("offset", -5)
alpha = kw.pop("alpha", 0.5)
# Figure.
figsize = kw.pop("figsize", (12, 6))
fig, ax = plt.subplots(figsize=figsize)
xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
ax.plot(xm, hm, color="black", linewidth=linewidth, zorder=3)
ax.fill_between(xm, hm, y2=hm.max(), color="0.9", zorder=3)
if station_marker:
ax.plot(
x,
[offset] * len(h),
color=color,
marker=station_marker,
alpha=alpha,
zorder=5,
)
ax.set_xlabel("Cross-shore distance [km]", fontsize=fontsize)
ax.set_ylabel("Depth [m]", fontsize=fontsize)
ax.set_ylim(offset, hm.max())
ax.invert_yaxis()
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
ax.yaxis.set_ticks_position("left")
ax.yaxis.set_label_position("left")
ax.xaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
ax.yaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
# Color version.
cs = ax.contourf(
x,
z,
data,
cmap=cmap,
levels=levels,
alpha=1.0,
extend=extend,
zorder=2,
) # manual=True
# Colorbar.
cb = fig.colorbar(
mappable=cs,
ax=ax,
orientation="vertical",
aspect=aspect,
shrink=shrink,
fraction=fraction,
pad=pad,
)
return fig, ax, cb
def cell_thermal_mass(temperature, conductivity):
"""
Sample interval is measured in seconds.
Temperature in degrees.
CTM is calculated in S/m.
"""
alpha = 0.03 # Thermal anomaly amplitude.
beta = 1.0 / 7 # Thermal anomaly time constant (1/beta).
sample_interval = 1 / 15.0
a = 2 * alpha / (sample_interval * beta + 2)
b = 1 - (2 * a / alpha)
dCodT = 0.1 * (1 + 0.006 * [temperature - 20])
dT = np.diff(temperature)
ctm = -1.0 * b * conductivity + a * (dCodT) * dT # [S/m]
return ctm
def mixed_layer_depth(CT, method="half degree"):
if method == "half degree":
mask = CT[0] - CT < 0.5
else:
mask = np.zeros_like(CT)
return Series(mask, index=CT.index, name="MLD")
def barrier_layer_thickness(SA, CT):
"""
Compute the thickness of water separating the mixed surface layer from the
thermocline. A more precise definition would be the difference between
mixed layer depth (MLD) calculated from temperature minus the mixed layer
depth calculated using density.
"""
import gsw
sigma_theta = gsw.sigma0(SA, CT)
mask = mixed_layer_depth(CT)
mld = np.where(mask)[0][-1]
sig_surface = sigma_theta[0]
sig_bottom_mld = gsw.sigma0(SA[0], CT[mld])
d_sig_t = sig_surface - sig_bottom_mld
d_sig = sigma_theta - sig_bottom_mld
mask = d_sig < d_sig_t # Barrier layer.
return Series(mask, index=SA.index, name="BLT")
|
pyoceans/python-ctd | ctd/extras.py | cell_thermal_mass | python | def cell_thermal_mass(temperature, conductivity):
alpha = 0.03 # Thermal anomaly amplitude.
beta = 1.0 / 7 # Thermal anomaly time constant (1/beta).
sample_interval = 1 / 15.0
a = 2 * alpha / (sample_interval * beta + 2)
b = 1 - (2 * a / alpha)
dCodT = 0.1 * (1 + 0.006 * [temperature - 20])
dT = np.diff(temperature)
ctm = -1.0 * b * conductivity + a * (dCodT) * dT # [S/m]
return ctm | Sample interval is measured in seconds.
Temperature in degrees.
CTM is calculated in S/m. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/extras.py#L243-L260 | null | import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
from pandas import Series
def _extrap1d(interpolator):
"""
http://stackoverflow.com/questions/2745329/
How to make scipy.interpolate return an extrapolated result beyond the
input range.
"""
xs, ys = interpolator.x, interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])
elif x > xs[-1]:
return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (
xs[-1] - xs[-2]
)
else:
return interpolator(x)
def ufunclike(xs):
return np.array(list(map(pointwise, np.array(xs))))
return ufunclike
def get_maxdepth(self):
valid_last_depth = self.apply(Series.notnull).values.T
return np.float_(self.index.values * valid_last_depth).max(axis=1)
def extrap_sec(data, dist, depth, w1=1.0, w2=0):
"""
Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable
"""
from scipy.interpolate import interp1d
new_data1 = []
for row in data:
mask = ~np.isnan(row)
if mask.any():
y = row[mask]
if y.size == 1:
row = np.repeat(y, len(mask))
else:
x = dist[mask]
f_i = interp1d(x, y)
f_x = _extrap1d(f_i)
row = f_x(dist)
new_data1.append(row)
new_data2 = []
for col in data.T:
mask = ~np.isnan(col)
if mask.any():
y = col[mask]
if y.size == 1:
col = np.repeat(y, len(mask))
else:
z = depth[mask]
f_i = interp1d(z, y)
f_z = _extrap1d(f_i)
col = f_z(depth)
new_data2.append(col)
new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2
return new_data
def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False):
"""
Generates a topography mask from an oceanographic transect taking the
deepest CTD scan as the depth of each station.
Inputs
------
h : array
Pressure of the deepest CTD scan for each station [dbar].
lons : array
Longitude of each station [decimal degrees east].
lat : Latitude of each station. [decimal degrees north].
dx : float
Horizontal resolution of the output arrays [km].
kind : string, optional
Type of the interpolation to be performed.
See scipy.interpolate.interp1d documentation for details.
plot : bool
Whether to plot mask for visualization.
Outputs
-------
xm : array
Horizontal distances [km].
hm : array
Local depth [m].
Author
------
André Palóczy Filho (paloczy@gmail.com) -- October/2012
"""
import gsw
from scipy.interpolate import interp1d
h, lon, lat = list(map(np.asanyarray, (h, lon, lat)))
# Distance in km.
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
h = -gsw.z_from_p(h, lat.mean())
Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
xm = np.arange(0, x.max() + dx, dx)
hm = Ih(xm)
return xm, hm
def plot_section(self, reverse=False, filled=False, **kw):
import gsw
lon, lat, data = list(
map(np.asanyarray, (self.lon, self.lat, self.values))
)
data = ma.masked_invalid(data)
h = self.get_maxdepth()
if reverse:
lon = lon[::-1]
lat = lat[::-1]
data = data.T[::-1].T
h = h[::-1]
lon, lat = map(np.atleast_2d, (lon, lat))
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
z = self.index.values.astype(float)
if filled: # CAVEAT: this method cause discontinuities.
data = data.filled(fill_value=np.nan)
data = extrap_sec(data, x, z, w1=0.97, w2=0.03)
# Contour key words.
extend = kw.pop("extend", "both")
fontsize = kw.pop("fontsize", 12)
labelsize = kw.pop("labelsize", 11)
cmap = kw.pop("cmap", plt.cm.rainbow)
levels = kw.pop(
"levels",
np.arange(np.floor(data.min()), np.ceil(data.max()) + 0.5, 0.5),
)
# Colorbar key words.
pad = kw.pop("pad", 0.04)
aspect = kw.pop("aspect", 40)
shrink = kw.pop("shrink", 0.9)
fraction = kw.pop("fraction", 0.05)
# Topography mask key words.
dx = kw.pop("dx", 1.0)
kind = kw.pop("kind", "linear")
linewidth = kw.pop("linewidth", 1.5)
# Station symbols key words.
station_marker = kw.pop("station_marker", None)
color = kw.pop("color", "k")
offset = kw.pop("offset", -5)
alpha = kw.pop("alpha", 0.5)
# Figure.
figsize = kw.pop("figsize", (12, 6))
fig, ax = plt.subplots(figsize=figsize)
xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
ax.plot(xm, hm, color="black", linewidth=linewidth, zorder=3)
ax.fill_between(xm, hm, y2=hm.max(), color="0.9", zorder=3)
if station_marker:
ax.plot(
x,
[offset] * len(h),
color=color,
marker=station_marker,
alpha=alpha,
zorder=5,
)
ax.set_xlabel("Cross-shore distance [km]", fontsize=fontsize)
ax.set_ylabel("Depth [m]", fontsize=fontsize)
ax.set_ylim(offset, hm.max())
ax.invert_yaxis()
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
ax.yaxis.set_ticks_position("left")
ax.yaxis.set_label_position("left")
ax.xaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
ax.yaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
# Color version.
cs = ax.contourf(
x,
z,
data,
cmap=cmap,
levels=levels,
alpha=1.0,
extend=extend,
zorder=2,
) # manual=True
# Colorbar.
cb = fig.colorbar(
mappable=cs,
ax=ax,
orientation="vertical",
aspect=aspect,
shrink=shrink,
fraction=fraction,
pad=pad,
)
return fig, ax, cb
def mixed_layer_depth(CT, method="half degree"):
if method == "half degree":
mask = CT[0] - CT < 0.5
else:
mask = np.zeros_like(CT)
return Series(mask, index=CT.index, name="MLD")
def barrier_layer_thickness(SA, CT):
"""
Compute the thickness of water separating the mixed surface layer from the
thermocline. A more precise definition would be the difference between
mixed layer depth (MLD) calculated from temperature minus the mixed layer
depth calculated using density.
"""
import gsw
sigma_theta = gsw.sigma0(SA, CT)
mask = mixed_layer_depth(CT)
mld = np.where(mask)[0][-1]
sig_surface = sigma_theta[0]
sig_bottom_mld = gsw.sigma0(SA[0], CT[mld])
d_sig_t = sig_surface - sig_bottom_mld
d_sig = sigma_theta - sig_bottom_mld
mask = d_sig < d_sig_t # Barrier layer.
return Series(mask, index=SA.index, name="BLT")
|
pyoceans/python-ctd | ctd/extras.py | barrier_layer_thickness | python | def barrier_layer_thickness(SA, CT):
import gsw
sigma_theta = gsw.sigma0(SA, CT)
mask = mixed_layer_depth(CT)
mld = np.where(mask)[0][-1]
sig_surface = sigma_theta[0]
sig_bottom_mld = gsw.sigma0(SA[0], CT[mld])
d_sig_t = sig_surface - sig_bottom_mld
d_sig = sigma_theta - sig_bottom_mld
mask = d_sig < d_sig_t # Barrier layer.
return Series(mask, index=SA.index, name="BLT") | Compute the thickness of water separating the mixed surface layer from the
thermocline. A more precise definition would be the difference between
mixed layer depth (MLD) calculated from temperature minus the mixed layer
depth calculated using density. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/extras.py#L271-L289 | [
"def mixed_layer_depth(CT, method=\"half degree\"):\n if method == \"half degree\":\n mask = CT[0] - CT < 0.5\n else:\n mask = np.zeros_like(CT)\n return Series(mask, index=CT.index, name=\"MLD\")\n"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
from pandas import Series
def _extrap1d(interpolator):
"""
http://stackoverflow.com/questions/2745329/
How to make scipy.interpolate return an extrapolated result beyond the
input range.
"""
xs, ys = interpolator.x, interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])
elif x > xs[-1]:
return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (
xs[-1] - xs[-2]
)
else:
return interpolator(x)
def ufunclike(xs):
return np.array(list(map(pointwise, np.array(xs))))
return ufunclike
def get_maxdepth(self):
valid_last_depth = self.apply(Series.notnull).values.T
return np.float_(self.index.values * valid_last_depth).max(axis=1)
def extrap_sec(data, dist, depth, w1=1.0, w2=0):
"""
Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable
"""
from scipy.interpolate import interp1d
new_data1 = []
for row in data:
mask = ~np.isnan(row)
if mask.any():
y = row[mask]
if y.size == 1:
row = np.repeat(y, len(mask))
else:
x = dist[mask]
f_i = interp1d(x, y)
f_x = _extrap1d(f_i)
row = f_x(dist)
new_data1.append(row)
new_data2 = []
for col in data.T:
mask = ~np.isnan(col)
if mask.any():
y = col[mask]
if y.size == 1:
col = np.repeat(y, len(mask))
else:
z = depth[mask]
f_i = interp1d(z, y)
f_z = _extrap1d(f_i)
col = f_z(depth)
new_data2.append(col)
new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2
return new_data
def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False):
"""
Generates a topography mask from an oceanographic transect taking the
deepest CTD scan as the depth of each station.
Inputs
------
h : array
Pressure of the deepest CTD scan for each station [dbar].
lons : array
Longitude of each station [decimal degrees east].
lat : Latitude of each station. [decimal degrees north].
dx : float
Horizontal resolution of the output arrays [km].
kind : string, optional
Type of the interpolation to be performed.
See scipy.interpolate.interp1d documentation for details.
plot : bool
Whether to plot mask for visualization.
Outputs
-------
xm : array
Horizontal distances [km].
hm : array
Local depth [m].
Author
------
André Palóczy Filho (paloczy@gmail.com) -- October/2012
"""
import gsw
from scipy.interpolate import interp1d
h, lon, lat = list(map(np.asanyarray, (h, lon, lat)))
# Distance in km.
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
h = -gsw.z_from_p(h, lat.mean())
Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
xm = np.arange(0, x.max() + dx, dx)
hm = Ih(xm)
return xm, hm
def plot_section(self, reverse=False, filled=False, **kw):
import gsw
lon, lat, data = list(
map(np.asanyarray, (self.lon, self.lat, self.values))
)
data = ma.masked_invalid(data)
h = self.get_maxdepth()
if reverse:
lon = lon[::-1]
lat = lat[::-1]
data = data.T[::-1].T
h = h[::-1]
lon, lat = map(np.atleast_2d, (lon, lat))
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
z = self.index.values.astype(float)
if filled: # CAVEAT: this method cause discontinuities.
data = data.filled(fill_value=np.nan)
data = extrap_sec(data, x, z, w1=0.97, w2=0.03)
# Contour key words.
extend = kw.pop("extend", "both")
fontsize = kw.pop("fontsize", 12)
labelsize = kw.pop("labelsize", 11)
cmap = kw.pop("cmap", plt.cm.rainbow)
levels = kw.pop(
"levels",
np.arange(np.floor(data.min()), np.ceil(data.max()) + 0.5, 0.5),
)
# Colorbar key words.
pad = kw.pop("pad", 0.04)
aspect = kw.pop("aspect", 40)
shrink = kw.pop("shrink", 0.9)
fraction = kw.pop("fraction", 0.05)
# Topography mask key words.
dx = kw.pop("dx", 1.0)
kind = kw.pop("kind", "linear")
linewidth = kw.pop("linewidth", 1.5)
# Station symbols key words.
station_marker = kw.pop("station_marker", None)
color = kw.pop("color", "k")
offset = kw.pop("offset", -5)
alpha = kw.pop("alpha", 0.5)
# Figure.
figsize = kw.pop("figsize", (12, 6))
fig, ax = plt.subplots(figsize=figsize)
xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
ax.plot(xm, hm, color="black", linewidth=linewidth, zorder=3)
ax.fill_between(xm, hm, y2=hm.max(), color="0.9", zorder=3)
if station_marker:
ax.plot(
x,
[offset] * len(h),
color=color,
marker=station_marker,
alpha=alpha,
zorder=5,
)
ax.set_xlabel("Cross-shore distance [km]", fontsize=fontsize)
ax.set_ylabel("Depth [m]", fontsize=fontsize)
ax.set_ylim(offset, hm.max())
ax.invert_yaxis()
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
ax.yaxis.set_ticks_position("left")
ax.yaxis.set_label_position("left")
ax.xaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
ax.yaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
# Color version.
cs = ax.contourf(
x,
z,
data,
cmap=cmap,
levels=levels,
alpha=1.0,
extend=extend,
zorder=2,
) # manual=True
# Colorbar.
cb = fig.colorbar(
mappable=cs,
ax=ax,
orientation="vertical",
aspect=aspect,
shrink=shrink,
fraction=fraction,
pad=pad,
)
return fig, ax, cb
def cell_thermal_mass(temperature, conductivity):
"""
Sample interval is measured in seconds.
Temperature in degrees.
CTM is calculated in S/m.
"""
alpha = 0.03 # Thermal anomaly amplitude.
beta = 1.0 / 7 # Thermal anomaly time constant (1/beta).
sample_interval = 1 / 15.0
a = 2 * alpha / (sample_interval * beta + 2)
b = 1 - (2 * a / alpha)
dCodT = 0.1 * (1 + 0.006 * [temperature - 20])
dT = np.diff(temperature)
ctm = -1.0 * b * conductivity + a * (dCodT) * dT # [S/m]
return ctm
def mixed_layer_depth(CT, method="half degree"):
if method == "half degree":
mask = CT[0] - CT < 0.5
else:
mask = np.zeros_like(CT)
return Series(mask, index=CT.index, name="MLD")
|
pyoceans/python-ctd | ctd/plotting.py | plot_cast | python | def plot_cast(df, secondary_y=False, label=None, *args, **kwargs):
ax = kwargs.pop("ax", None)
fignums = plt.get_fignums()
if ax is None and not fignums:
ax = plt.axes()
fig = ax.get_figure()
fig.set_size_inches((5.25, 6.75))
else:
ax = plt.gca()
fig = plt.gcf()
figsize = kwargs.pop("figsize", fig.get_size_inches())
fig.set_size_inches(figsize)
y_inverted = False
if not getattr(ax, "y_inverted", False):
setattr(ax, "y_inverted", True)
y_inverted = True
if secondary_y:
ax = ax.twiny()
xlabel = getattr(df, "name", None)
ylabel = getattr(df.index, "name", None)
if isinstance(df, pd.DataFrame):
labels = label if label else df.columns
for k, (col, series) in enumerate(df.iteritems()):
ax.plot(series, series.index, label=labels[k])
elif isinstance(df, pd.Series):
label = label if label else str(df.name)
ax.plot(df.values, df.index, label=label, *args, **kwargs)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if y_inverted and not secondary_y:
ax.invert_yaxis()
return ax | Plot a CTD variable with the index in the y-axis instead of x-axis. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/plotting.py#L8-L50 | null | import matplotlib.pyplot as plt
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
@register_series_method
@register_dataframe_method
|
pyoceans/python-ctd | ctd/processing.py | _rolling_window | python | def _rolling_window(data, block):
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides) | http://stackoverflow.com/questions/4936620/
Using strides for an efficient moving average filter. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/processing.py#L7-L15 | null | import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
@register_series_method
@register_dataframe_method
def remove_above_water(df):
new_df = df.copy()
return new_df[new_df.index >= 0]
@register_series_method
@register_dataframe_method
def split(df):
"""Returns a tuple with down/up-cast."""
idx = df.index.argmax() + 1
down = df.iloc[:idx]
# Reverse index to orient it as a CTD cast.
up = df.iloc[idx:][::-1]
return down, up
@register_series_method
@register_dataframe_method
def lp_filter(df, sample_rate=24.0, time_constant=0.15):
"""
Filter a series with `time_constant` (use 0.15 s for pressure), and for
a signal of `sample_rate` in Hertz (24 Hz for 911+).
NOTE: 911+ systems do not require filter for temperature nor salinity.
Examples
--------
>>> from pathlib import Path
>>> import matplotlib.pyplot as plt
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))
>>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))
>>> kw = {"sample_rate": 24.0, "time_constant": 0.15}
>>> original = prc.index.values
>>> unfiltered = raw.index.values
>>> filtered = raw.lp_filter(**kw).index.values
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(original, 'k', label='original')
>>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')
>>> l3, = ax.plot(filtered, 'g', label='filtered')
>>> leg = ax.legend()
Notes
-----
https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
"""
from scipy import signal
# Butter is closer to what SBE is doing with their cosine filter.
Wn = (1.0 / time_constant) / (sample_rate * 2.0)
b, a = signal.butter(2, Wn, "low")
new_df = df.copy()
new_df.index = signal.filtfilt(b, a, df.index.values)
return new_df
@register_series_method
@register_dataframe_method
def press_check(df):
"""
Remove pressure reversals from the index.
"""
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df
@register_series_method
@register_dataframe_method
def bindata(df, delta=1.0, method="average"):
"""
Bin average the index (usually pressure) to a given interval (default
delta = 1).
"""
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df
def _despike(series, n1, n2, block, keep):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
data = series.values.astype(float).copy()
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n1 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
data[mask] = np.NaN
# Pass two recompute the mean and std without the flagged values from pass
# one and removed the flagged data.
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n2 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
values = series.values.astype(float)
mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
clean = series.astype(float).copy()
clean[mask] = np.NaN
return clean
@register_series_method
@register_dataframe_method
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df
def _smooth(series, window_len, window):
"""Smooth the data using a window with requested size."""
windows = {
"flat": np.ones,
"hanning": np.hanning,
"hamming": np.hamming,
"bartlett": np.bartlett,
"blackman": np.blackman,
}
data = series.values.copy()
if window_len < 3:
return pd.Series(data, index=series.index, name=series.name)
if window not in list(windows.keys()):
raise ValueError(
"""window must be one of 'flat', 'hanning',
'hamming', 'bartlett', 'blackman'"""
)
s = np.r_[
2 * data[0] - data[window_len:1:-1],
data,
2 * data[-1] - data[-1:-window_len:-1],
]
w = windows[window](window_len)
data = np.convolve(w / w.sum(), s, mode="same")
data = data[window_len - 1 : -window_len + 1]
return pd.Series(data, index=series.index, name=series.name)
@register_series_method
@register_dataframe_method
def smooth(df, window_len=11, window="hanning"):
"""Smooth the data using a window with requested size."""
if isinstance(df, pd.Series):
new_df = _smooth(df, window_len=window_len, window=window)
else:
new_df = df.apply(_smooth, window_len=window_len, window=window)
return new_df
def _movingaverage(series, window_size=48):
window = np.ones(int(window_size)) / float(window_size)
return pd.Series(np.convolve(series, window, "same"), index=series.index)
@register_series_method
@register_dataframe_method
def movingaverage(df, window_size=48):
if isinstance(df, pd.Series):
new_df = _movingaverage(df, window_size=window_size)
else:
new_df = df.apply(_movingaverage, window_size=window_size)
return new_df
|
pyoceans/python-ctd | ctd/processing.py | split | python | def split(df):
idx = df.index.argmax() + 1
down = df.iloc[:idx]
# Reverse index to orient it as a CTD cast.
up = df.iloc[idx:][::-1]
return down, up | Returns a tuple with down/up-cast. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/processing.py#L27-L33 | null | import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
def _rolling_window(data, block):
"""
http://stackoverflow.com/questions/4936620/
Using strides for an efficient moving average filter.
"""
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@register_series_method
@register_dataframe_method
def remove_above_water(df):
new_df = df.copy()
return new_df[new_df.index >= 0]
@register_series_method
@register_dataframe_method
@register_series_method
@register_dataframe_method
def lp_filter(df, sample_rate=24.0, time_constant=0.15):
"""
Filter a series with `time_constant` (use 0.15 s for pressure), and for
a signal of `sample_rate` in Hertz (24 Hz for 911+).
NOTE: 911+ systems do not require filter for temperature nor salinity.
Examples
--------
>>> from pathlib import Path
>>> import matplotlib.pyplot as plt
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))
>>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))
>>> kw = {"sample_rate": 24.0, "time_constant": 0.15}
>>> original = prc.index.values
>>> unfiltered = raw.index.values
>>> filtered = raw.lp_filter(**kw).index.values
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(original, 'k', label='original')
>>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')
>>> l3, = ax.plot(filtered, 'g', label='filtered')
>>> leg = ax.legend()
Notes
-----
https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
"""
from scipy import signal
# Butter is closer to what SBE is doing with their cosine filter.
Wn = (1.0 / time_constant) / (sample_rate * 2.0)
b, a = signal.butter(2, Wn, "low")
new_df = df.copy()
new_df.index = signal.filtfilt(b, a, df.index.values)
return new_df
@register_series_method
@register_dataframe_method
def press_check(df):
"""
Remove pressure reversals from the index.
"""
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df
@register_series_method
@register_dataframe_method
def bindata(df, delta=1.0, method="average"):
"""
Bin average the index (usually pressure) to a given interval (default
delta = 1).
"""
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df
def _despike(series, n1, n2, block, keep):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
data = series.values.astype(float).copy()
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n1 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
data[mask] = np.NaN
# Pass two recompute the mean and std without the flagged values from pass
# one and removed the flagged data.
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n2 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
values = series.values.astype(float)
mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
clean = series.astype(float).copy()
clean[mask] = np.NaN
return clean
@register_series_method
@register_dataframe_method
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df
def _smooth(series, window_len, window):
"""Smooth the data using a window with requested size."""
windows = {
"flat": np.ones,
"hanning": np.hanning,
"hamming": np.hamming,
"bartlett": np.bartlett,
"blackman": np.blackman,
}
data = series.values.copy()
if window_len < 3:
return pd.Series(data, index=series.index, name=series.name)
if window not in list(windows.keys()):
raise ValueError(
"""window must be one of 'flat', 'hanning',
'hamming', 'bartlett', 'blackman'"""
)
s = np.r_[
2 * data[0] - data[window_len:1:-1],
data,
2 * data[-1] - data[-1:-window_len:-1],
]
w = windows[window](window_len)
data = np.convolve(w / w.sum(), s, mode="same")
data = data[window_len - 1 : -window_len + 1]
return pd.Series(data, index=series.index, name=series.name)
@register_series_method
@register_dataframe_method
def smooth(df, window_len=11, window="hanning"):
"""Smooth the data using a window with requested size."""
if isinstance(df, pd.Series):
new_df = _smooth(df, window_len=window_len, window=window)
else:
new_df = df.apply(_smooth, window_len=window_len, window=window)
return new_df
def _movingaverage(series, window_size=48):
window = np.ones(int(window_size)) / float(window_size)
return pd.Series(np.convolve(series, window, "same"), index=series.index)
@register_series_method
@register_dataframe_method
def movingaverage(df, window_size=48):
if isinstance(df, pd.Series):
new_df = _movingaverage(df, window_size=window_size)
else:
new_df = df.apply(_movingaverage, window_size=window_size)
return new_df
|
pyoceans/python-ctd | ctd/processing.py | lp_filter | python | def lp_filter(df, sample_rate=24.0, time_constant=0.15):
from scipy import signal
# Butter is closer to what SBE is doing with their cosine filter.
Wn = (1.0 / time_constant) / (sample_rate * 2.0)
b, a = signal.butter(2, Wn, "low")
new_df = df.copy()
new_df.index = signal.filtfilt(b, a, df.index.values)
return new_df | Filter a series with `time_constant` (use 0.15 s for pressure), and for
a signal of `sample_rate` in Hertz (24 Hz for 911+).
NOTE: 911+ systems do not require filter for temperature nor salinity.
Examples
--------
>>> from pathlib import Path
>>> import matplotlib.pyplot as plt
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))
>>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))
>>> kw = {"sample_rate": 24.0, "time_constant": 0.15}
>>> original = prc.index.values
>>> unfiltered = raw.index.values
>>> filtered = raw.lp_filter(**kw).index.values
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(original, 'k', label='original')
>>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')
>>> l3, = ax.plot(filtered, 'g', label='filtered')
>>> leg = ax.legend()
Notes
-----
https://scipy-cookbook.readthedocs.io/items/FIRFilter.html | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/processing.py#L38-L75 | null | import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
def _rolling_window(data, block):
"""
http://stackoverflow.com/questions/4936620/
Using strides for an efficient moving average filter.
"""
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@register_series_method
@register_dataframe_method
def remove_above_water(df):
new_df = df.copy()
return new_df[new_df.index >= 0]
@register_series_method
@register_dataframe_method
def split(df):
"""Returns a tuple with down/up-cast."""
idx = df.index.argmax() + 1
down = df.iloc[:idx]
# Reverse index to orient it as a CTD cast.
up = df.iloc[idx:][::-1]
return down, up
@register_series_method
@register_dataframe_method
@register_series_method
@register_dataframe_method
def press_check(df):
"""
Remove pressure reversals from the index.
"""
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df
@register_series_method
@register_dataframe_method
def bindata(df, delta=1.0, method="average"):
"""
Bin average the index (usually pressure) to a given interval (default
delta = 1).
"""
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df
def _despike(series, n1, n2, block, keep):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
data = series.values.astype(float).copy()
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n1 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
data[mask] = np.NaN
# Pass two recompute the mean and std without the flagged values from pass
# one and removed the flagged data.
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n2 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
values = series.values.astype(float)
mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
clean = series.astype(float).copy()
clean[mask] = np.NaN
return clean
@register_series_method
@register_dataframe_method
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df
def _smooth(series, window_len, window):
"""Smooth the data using a window with requested size."""
windows = {
"flat": np.ones,
"hanning": np.hanning,
"hamming": np.hamming,
"bartlett": np.bartlett,
"blackman": np.blackman,
}
data = series.values.copy()
if window_len < 3:
return pd.Series(data, index=series.index, name=series.name)
if window not in list(windows.keys()):
raise ValueError(
"""window must be one of 'flat', 'hanning',
'hamming', 'bartlett', 'blackman'"""
)
s = np.r_[
2 * data[0] - data[window_len:1:-1],
data,
2 * data[-1] - data[-1:-window_len:-1],
]
w = windows[window](window_len)
data = np.convolve(w / w.sum(), s, mode="same")
data = data[window_len - 1 : -window_len + 1]
return pd.Series(data, index=series.index, name=series.name)
@register_series_method
@register_dataframe_method
def smooth(df, window_len=11, window="hanning"):
"""Smooth the data using a window with requested size."""
if isinstance(df, pd.Series):
new_df = _smooth(df, window_len=window_len, window=window)
else:
new_df = df.apply(_smooth, window_len=window_len, window=window)
return new_df
def _movingaverage(series, window_size=48):
window = np.ones(int(window_size)) / float(window_size)
return pd.Series(np.convolve(series, window, "same"), index=series.index)
@register_series_method
@register_dataframe_method
def movingaverage(df, window_size=48):
if isinstance(df, pd.Series):
new_df = _movingaverage(df, window_size=window_size)
else:
new_df = df.apply(_movingaverage, window_size=window_size)
return new_df
|
pyoceans/python-ctd | ctd/processing.py | press_check | python | def press_check(df):
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df | Remove pressure reversals from the index. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/processing.py#L80-L97 | null | import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
def _rolling_window(data, block):
"""
http://stackoverflow.com/questions/4936620/
Using strides for an efficient moving average filter.
"""
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@register_series_method
@register_dataframe_method
def remove_above_water(df):
new_df = df.copy()
return new_df[new_df.index >= 0]
@register_series_method
@register_dataframe_method
def split(df):
"""Returns a tuple with down/up-cast."""
idx = df.index.argmax() + 1
down = df.iloc[:idx]
# Reverse index to orient it as a CTD cast.
up = df.iloc[idx:][::-1]
return down, up
@register_series_method
@register_dataframe_method
def lp_filter(df, sample_rate=24.0, time_constant=0.15):
"""
Filter a series with `time_constant` (use 0.15 s for pressure), and for
a signal of `sample_rate` in Hertz (24 Hz for 911+).
NOTE: 911+ systems do not require filter for temperature nor salinity.
Examples
--------
>>> from pathlib import Path
>>> import matplotlib.pyplot as plt
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))
>>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))
>>> kw = {"sample_rate": 24.0, "time_constant": 0.15}
>>> original = prc.index.values
>>> unfiltered = raw.index.values
>>> filtered = raw.lp_filter(**kw).index.values
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(original, 'k', label='original')
>>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')
>>> l3, = ax.plot(filtered, 'g', label='filtered')
>>> leg = ax.legend()
Notes
-----
https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
"""
from scipy import signal
# Butter is closer to what SBE is doing with their cosine filter.
Wn = (1.0 / time_constant) / (sample_rate * 2.0)
b, a = signal.butter(2, Wn, "low")
new_df = df.copy()
new_df.index = signal.filtfilt(b, a, df.index.values)
return new_df
@register_series_method
@register_dataframe_method
@register_series_method
@register_dataframe_method
def bindata(df, delta=1.0, method="average"):
"""
Bin average the index (usually pressure) to a given interval (default
delta = 1).
"""
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df
def _despike(series, n1, n2, block, keep):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
data = series.values.astype(float).copy()
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n1 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
data[mask] = np.NaN
# Pass two recompute the mean and std without the flagged values from pass
# one and removed the flagged data.
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n2 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
values = series.values.astype(float)
mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
clean = series.astype(float).copy()
clean[mask] = np.NaN
return clean
@register_series_method
@register_dataframe_method
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df
def _smooth(series, window_len, window):
"""Smooth the data using a window with requested size."""
windows = {
"flat": np.ones,
"hanning": np.hanning,
"hamming": np.hamming,
"bartlett": np.bartlett,
"blackman": np.blackman,
}
data = series.values.copy()
if window_len < 3:
return pd.Series(data, index=series.index, name=series.name)
if window not in list(windows.keys()):
raise ValueError(
"""window must be one of 'flat', 'hanning',
'hamming', 'bartlett', 'blackman'"""
)
s = np.r_[
2 * data[0] - data[window_len:1:-1],
data,
2 * data[-1] - data[-1:-window_len:-1],
]
w = windows[window](window_len)
data = np.convolve(w / w.sum(), s, mode="same")
data = data[window_len - 1 : -window_len + 1]
return pd.Series(data, index=series.index, name=series.name)
@register_series_method
@register_dataframe_method
def smooth(df, window_len=11, window="hanning"):
"""Smooth the data using a window with requested size."""
if isinstance(df, pd.Series):
new_df = _smooth(df, window_len=window_len, window=window)
else:
new_df = df.apply(_smooth, window_len=window_len, window=window)
return new_df
def _movingaverage(series, window_size=48):
window = np.ones(int(window_size)) / float(window_size)
return pd.Series(np.convolve(series, window, "same"), index=series.index)
@register_series_method
@register_dataframe_method
def movingaverage(df, window_size=48):
if isinstance(df, pd.Series):
new_df = _movingaverage(df, window_size=window_size)
else:
new_df = df.apply(_movingaverage, window_size=window_size)
return new_df
|
pyoceans/python-ctd | ctd/processing.py | bindata | python | def bindata(df, delta=1.0, method="average"):
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df | Bin average the index (usually pressure) to a given interval (default
delta = 1). | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/processing.py#L102-L123 | null | import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
def _rolling_window(data, block):
"""
http://stackoverflow.com/questions/4936620/
Using strides for an efficient moving average filter.
"""
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@register_series_method
@register_dataframe_method
def remove_above_water(df):
new_df = df.copy()
return new_df[new_df.index >= 0]
@register_series_method
@register_dataframe_method
def split(df):
"""Returns a tuple with down/up-cast."""
idx = df.index.argmax() + 1
down = df.iloc[:idx]
# Reverse index to orient it as a CTD cast.
up = df.iloc[idx:][::-1]
return down, up
@register_series_method
@register_dataframe_method
def lp_filter(df, sample_rate=24.0, time_constant=0.15):
"""
Filter a series with `time_constant` (use 0.15 s for pressure), and for
a signal of `sample_rate` in Hertz (24 Hz for 911+).
NOTE: 911+ systems do not require filter for temperature nor salinity.
Examples
--------
>>> from pathlib import Path
>>> import matplotlib.pyplot as plt
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))
>>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))
>>> kw = {"sample_rate": 24.0, "time_constant": 0.15}
>>> original = prc.index.values
>>> unfiltered = raw.index.values
>>> filtered = raw.lp_filter(**kw).index.values
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(original, 'k', label='original')
>>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')
>>> l3, = ax.plot(filtered, 'g', label='filtered')
>>> leg = ax.legend()
Notes
-----
https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
"""
from scipy import signal
# Butter is closer to what SBE is doing with their cosine filter.
Wn = (1.0 / time_constant) / (sample_rate * 2.0)
b, a = signal.butter(2, Wn, "low")
new_df = df.copy()
new_df.index = signal.filtfilt(b, a, df.index.values)
return new_df
@register_series_method
@register_dataframe_method
def press_check(df):
"""
Remove pressure reversals from the index.
"""
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df
@register_series_method
@register_dataframe_method
def _despike(series, n1, n2, block, keep):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
data = series.values.astype(float).copy()
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n1 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
data[mask] = np.NaN
# Pass two recompute the mean and std without the flagged values from pass
# one and removed the flagged data.
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n2 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
values = series.values.astype(float)
mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
clean = series.astype(float).copy()
clean[mask] = np.NaN
return clean
@register_series_method
@register_dataframe_method
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df
def _smooth(series, window_len, window):
"""Smooth the data using a window with requested size."""
windows = {
"flat": np.ones,
"hanning": np.hanning,
"hamming": np.hamming,
"bartlett": np.bartlett,
"blackman": np.blackman,
}
data = series.values.copy()
if window_len < 3:
return pd.Series(data, index=series.index, name=series.name)
if window not in list(windows.keys()):
raise ValueError(
"""window must be one of 'flat', 'hanning',
'hamming', 'bartlett', 'blackman'"""
)
s = np.r_[
2 * data[0] - data[window_len:1:-1],
data,
2 * data[-1] - data[-1:-window_len:-1],
]
w = windows[window](window_len)
data = np.convolve(w / w.sum(), s, mode="same")
data = data[window_len - 1 : -window_len + 1]
return pd.Series(data, index=series.index, name=series.name)
@register_series_method
@register_dataframe_method
def smooth(df, window_len=11, window="hanning"):
"""Smooth the data using a window with requested size."""
if isinstance(df, pd.Series):
new_df = _smooth(df, window_len=window_len, window=window)
else:
new_df = df.apply(_smooth, window_len=window_len, window=window)
return new_df
def _movingaverage(series, window_size=48):
window = np.ones(int(window_size)) / float(window_size)
return pd.Series(np.convolve(series, window, "same"), index=series.index)
@register_series_method
@register_dataframe_method
def movingaverage(df, window_size=48):
if isinstance(df, pd.Series):
new_df = _movingaverage(df, window_size=window_size)
else:
new_df = df.apply(_movingaverage, window_size=window_size)
return new_df
|
pyoceans/python-ctd | ctd/processing.py | _despike | python | def _despike(series, n1, n2, block, keep):
data = series.values.astype(float).copy()
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n1 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
data[mask] = np.NaN
# Pass two recompute the mean and std without the flagged values from pass
# one and removed the flagged data.
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n2 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
values = series.values.astype(float)
mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
clean = series.astype(float).copy()
clean[mask] = np.NaN
return clean | Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/processing.py#L126-L162 | [
"def _rolling_window(data, block):\n \"\"\"\n http://stackoverflow.com/questions/4936620/\n Using strides for an efficient moving average filter.\n\n \"\"\"\n shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)\n strides = data.strides + (data.strides[-1],)\n return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)\n"
] | import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
def _rolling_window(data, block):
"""
http://stackoverflow.com/questions/4936620/
Using strides for an efficient moving average filter.
"""
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@register_series_method
@register_dataframe_method
def remove_above_water(df):
new_df = df.copy()
return new_df[new_df.index >= 0]
@register_series_method
@register_dataframe_method
def split(df):
"""Returns a tuple with down/up-cast."""
idx = df.index.argmax() + 1
down = df.iloc[:idx]
# Reverse index to orient it as a CTD cast.
up = df.iloc[idx:][::-1]
return down, up
@register_series_method
@register_dataframe_method
def lp_filter(df, sample_rate=24.0, time_constant=0.15):
"""
Filter a series with `time_constant` (use 0.15 s for pressure), and for
a signal of `sample_rate` in Hertz (24 Hz for 911+).
NOTE: 911+ systems do not require filter for temperature nor salinity.
Examples
--------
>>> from pathlib import Path
>>> import matplotlib.pyplot as plt
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))
>>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))
>>> kw = {"sample_rate": 24.0, "time_constant": 0.15}
>>> original = prc.index.values
>>> unfiltered = raw.index.values
>>> filtered = raw.lp_filter(**kw).index.values
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(original, 'k', label='original')
>>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')
>>> l3, = ax.plot(filtered, 'g', label='filtered')
>>> leg = ax.legend()
Notes
-----
https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
"""
from scipy import signal
# Butter is closer to what SBE is doing with their cosine filter.
Wn = (1.0 / time_constant) / (sample_rate * 2.0)
b, a = signal.butter(2, Wn, "low")
new_df = df.copy()
new_df.index = signal.filtfilt(b, a, df.index.values)
return new_df
@register_series_method
@register_dataframe_method
def press_check(df):
"""
Remove pressure reversals from the index.
"""
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df
@register_series_method
@register_dataframe_method
def bindata(df, delta=1.0, method="average"):
"""
Bin average the index (usually pressure) to a given interval (default
delta = 1).
"""
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df
@register_series_method
@register_dataframe_method
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df
def _smooth(series, window_len, window):
"""Smooth the data using a window with requested size."""
windows = {
"flat": np.ones,
"hanning": np.hanning,
"hamming": np.hamming,
"bartlett": np.bartlett,
"blackman": np.blackman,
}
data = series.values.copy()
if window_len < 3:
return pd.Series(data, index=series.index, name=series.name)
if window not in list(windows.keys()):
raise ValueError(
"""window must be one of 'flat', 'hanning',
'hamming', 'bartlett', 'blackman'"""
)
s = np.r_[
2 * data[0] - data[window_len:1:-1],
data,
2 * data[-1] - data[-1:-window_len:-1],
]
w = windows[window](window_len)
data = np.convolve(w / w.sum(), s, mode="same")
data = data[window_len - 1 : -window_len + 1]
return pd.Series(data, index=series.index, name=series.name)
@register_series_method
@register_dataframe_method
def smooth(df, window_len=11, window="hanning"):
"""Smooth the data using a window with requested size."""
if isinstance(df, pd.Series):
new_df = _smooth(df, window_len=window_len, window=window)
else:
new_df = df.apply(_smooth, window_len=window_len, window=window)
return new_df
def _movingaverage(series, window_size=48):
window = np.ones(int(window_size)) / float(window_size)
return pd.Series(np.convolve(series, window, "same"), index=series.index)
@register_series_method
@register_dataframe_method
def movingaverage(df, window_size=48):
if isinstance(df, pd.Series):
new_df = _movingaverage(df, window_size=window_size)
else:
new_df = df.apply(_movingaverage, window_size=window_size)
return new_df
|
pyoceans/python-ctd | ctd/processing.py | despike | python | def despike(df, n1=2, n2=20, block=100, keep=0):
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df | Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/processing.py#L167-L177 | [
"def _despike(series, n1, n2, block, keep):\n \"\"\"\n Wild Edit Seabird-like function. Passes with Standard deviation\n `n1` and `n2` with window size `block`.\n\n \"\"\"\n\n data = series.values.astype(float).copy()\n roll = _rolling_window(data, block)\n roll = ma.masked_invalid(roll)\n std = n1 * roll.std(axis=1)\n mean = roll.mean(axis=1)\n # Use the last value to fill-up.\n std = np.r_[std, np.tile(std[-1], block - 1)]\n mean = np.r_[mean, np.tile(mean[-1], block - 1)]\n mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(\n fill_value=np.NaN\n )\n data[mask] = np.NaN\n\n # Pass two recompute the mean and std without the flagged values from pass\n # one and removed the flagged data.\n roll = _rolling_window(data, block)\n roll = ma.masked_invalid(roll)\n std = n2 * roll.std(axis=1)\n mean = roll.mean(axis=1)\n # Use the last value to fill-up.\n std = np.r_[std, np.tile(std[-1], block - 1)]\n mean = np.r_[mean, np.tile(mean[-1], block - 1)]\n values = series.values.astype(float)\n mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(\n fill_value=np.NaN\n )\n\n clean = series.astype(float).copy()\n clean[mask] = np.NaN\n return clean\n"
] | import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
def _rolling_window(data, block):
"""
http://stackoverflow.com/questions/4936620/
Using strides for an efficient moving average filter.
"""
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@register_series_method
@register_dataframe_method
def remove_above_water(df):
new_df = df.copy()
return new_df[new_df.index >= 0]
@register_series_method
@register_dataframe_method
def split(df):
"""Returns a tuple with down/up-cast."""
idx = df.index.argmax() + 1
down = df.iloc[:idx]
# Reverse index to orient it as a CTD cast.
up = df.iloc[idx:][::-1]
return down, up
@register_series_method
@register_dataframe_method
def lp_filter(df, sample_rate=24.0, time_constant=0.15):
"""
Filter a series with `time_constant` (use 0.15 s for pressure), and for
a signal of `sample_rate` in Hertz (24 Hz for 911+).
NOTE: 911+ systems do not require filter for temperature nor salinity.
Examples
--------
>>> from pathlib import Path
>>> import matplotlib.pyplot as plt
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))
>>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))
>>> kw = {"sample_rate": 24.0, "time_constant": 0.15}
>>> original = prc.index.values
>>> unfiltered = raw.index.values
>>> filtered = raw.lp_filter(**kw).index.values
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(original, 'k', label='original')
>>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')
>>> l3, = ax.plot(filtered, 'g', label='filtered')
>>> leg = ax.legend()
Notes
-----
https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
"""
from scipy import signal
# Butter is closer to what SBE is doing with their cosine filter.
Wn = (1.0 / time_constant) / (sample_rate * 2.0)
b, a = signal.butter(2, Wn, "low")
new_df = df.copy()
new_df.index = signal.filtfilt(b, a, df.index.values)
return new_df
@register_series_method
@register_dataframe_method
def press_check(df):
"""
Remove pressure reversals from the index.
"""
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df
@register_series_method
@register_dataframe_method
def bindata(df, delta=1.0, method="average"):
"""
Bin average the index (usually pressure) to a given interval (default
delta = 1).
"""
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df
def _despike(series, n1, n2, block, keep):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
data = series.values.astype(float).copy()
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n1 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
data[mask] = np.NaN
# Pass two recompute the mean and std without the flagged values from pass
# one and removed the flagged data.
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n2 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
values = series.values.astype(float)
mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
clean = series.astype(float).copy()
clean[mask] = np.NaN
return clean
@register_series_method
@register_dataframe_method
def _smooth(series, window_len, window):
"""Smooth the data using a window with requested size."""
windows = {
"flat": np.ones,
"hanning": np.hanning,
"hamming": np.hamming,
"bartlett": np.bartlett,
"blackman": np.blackman,
}
data = series.values.copy()
if window_len < 3:
return pd.Series(data, index=series.index, name=series.name)
if window not in list(windows.keys()):
raise ValueError(
"""window must be one of 'flat', 'hanning',
'hamming', 'bartlett', 'blackman'"""
)
s = np.r_[
2 * data[0] - data[window_len:1:-1],
data,
2 * data[-1] - data[-1:-window_len:-1],
]
w = windows[window](window_len)
data = np.convolve(w / w.sum(), s, mode="same")
data = data[window_len - 1 : -window_len + 1]
return pd.Series(data, index=series.index, name=series.name)
@register_series_method
@register_dataframe_method
def smooth(df, window_len=11, window="hanning"):
"""Smooth the data using a window with requested size."""
if isinstance(df, pd.Series):
new_df = _smooth(df, window_len=window_len, window=window)
else:
new_df = df.apply(_smooth, window_len=window_len, window=window)
return new_df
def _movingaverage(series, window_size=48):
window = np.ones(int(window_size)) / float(window_size)
return pd.Series(np.convolve(series, window, "same"), index=series.index)
@register_series_method
@register_dataframe_method
def movingaverage(df, window_size=48):
if isinstance(df, pd.Series):
new_df = _movingaverage(df, window_size=window_size)
else:
new_df = df.apply(_movingaverage, window_size=window_size)
return new_df
|
pyoceans/python-ctd | ctd/processing.py | _smooth | python | def _smooth(series, window_len, window):
windows = {
"flat": np.ones,
"hanning": np.hanning,
"hamming": np.hamming,
"bartlett": np.bartlett,
"blackman": np.blackman,
}
data = series.values.copy()
if window_len < 3:
return pd.Series(data, index=series.index, name=series.name)
if window not in list(windows.keys()):
raise ValueError(
"""window must be one of 'flat', 'hanning',
'hamming', 'bartlett', 'blackman'"""
)
s = np.r_[
2 * data[0] - data[window_len:1:-1],
data,
2 * data[-1] - data[-1:-window_len:-1],
]
w = windows[window](window_len)
data = np.convolve(w / w.sum(), s, mode="same")
data = data[window_len - 1 : -window_len + 1]
return pd.Series(data, index=series.index, name=series.name) | Smooth the data using a window with requested size. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/processing.py#L180-L211 | null | import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
def _rolling_window(data, block):
"""
http://stackoverflow.com/questions/4936620/
Using strides for an efficient moving average filter.
"""
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@register_series_method
@register_dataframe_method
def remove_above_water(df):
new_df = df.copy()
return new_df[new_df.index >= 0]
@register_series_method
@register_dataframe_method
def split(df):
"""Returns a tuple with down/up-cast."""
idx = df.index.argmax() + 1
down = df.iloc[:idx]
# Reverse index to orient it as a CTD cast.
up = df.iloc[idx:][::-1]
return down, up
@register_series_method
@register_dataframe_method
def lp_filter(df, sample_rate=24.0, time_constant=0.15):
"""
Filter a series with `time_constant` (use 0.15 s for pressure), and for
a signal of `sample_rate` in Hertz (24 Hz for 911+).
NOTE: 911+ systems do not require filter for temperature nor salinity.
Examples
--------
>>> from pathlib import Path
>>> import matplotlib.pyplot as plt
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))
>>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))
>>> kw = {"sample_rate": 24.0, "time_constant": 0.15}
>>> original = prc.index.values
>>> unfiltered = raw.index.values
>>> filtered = raw.lp_filter(**kw).index.values
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(original, 'k', label='original')
>>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')
>>> l3, = ax.plot(filtered, 'g', label='filtered')
>>> leg = ax.legend()
Notes
-----
https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
"""
from scipy import signal
# Butter is closer to what SBE is doing with their cosine filter.
Wn = (1.0 / time_constant) / (sample_rate * 2.0)
b, a = signal.butter(2, Wn, "low")
new_df = df.copy()
new_df.index = signal.filtfilt(b, a, df.index.values)
return new_df
@register_series_method
@register_dataframe_method
def press_check(df):
"""
Remove pressure reversals from the index.
"""
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df
@register_series_method
@register_dataframe_method
def bindata(df, delta=1.0, method="average"):
"""
Bin average the index (usually pressure) to a given interval (default
delta = 1).
"""
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df
def _despike(series, n1, n2, block, keep):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
data = series.values.astype(float).copy()
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n1 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
data[mask] = np.NaN
# Pass two recompute the mean and std without the flagged values from pass
# one and removed the flagged data.
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n2 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
values = series.values.astype(float)
mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
clean = series.astype(float).copy()
clean[mask] = np.NaN
return clean
@register_series_method
@register_dataframe_method
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df
@register_series_method
@register_dataframe_method
def smooth(df, window_len=11, window="hanning"):
"""Smooth the data using a window with requested size."""
if isinstance(df, pd.Series):
new_df = _smooth(df, window_len=window_len, window=window)
else:
new_df = df.apply(_smooth, window_len=window_len, window=window)
return new_df
def _movingaverage(series, window_size=48):
window = np.ones(int(window_size)) / float(window_size)
return pd.Series(np.convolve(series, window, "same"), index=series.index)
@register_series_method
@register_dataframe_method
def movingaverage(df, window_size=48):
if isinstance(df, pd.Series):
new_df = _movingaverage(df, window_size=window_size)
else:
new_df = df.apply(_movingaverage, window_size=window_size)
return new_df
|
pyoceans/python-ctd | ctd/processing.py | smooth | python | def smooth(df, window_len=11, window="hanning"):
if isinstance(df, pd.Series):
new_df = _smooth(df, window_len=window_len, window=window)
else:
new_df = df.apply(_smooth, window_len=window_len, window=window)
return new_df | Smooth the data using a window with requested size. | train | https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/processing.py#L216-L222 | [
"def _smooth(series, window_len, window):\n \"\"\"Smooth the data using a window with requested size.\"\"\"\n\n windows = {\n \"flat\": np.ones,\n \"hanning\": np.hanning,\n \"hamming\": np.hamming,\n \"bartlett\": np.bartlett,\n \"blackman\": np.blackman,\n }\n data = series.values.copy()\n\n if window_len < 3:\n return pd.Series(data, index=series.index, name=series.name)\n\n if window not in list(windows.keys()):\n raise ValueError(\n \"\"\"window must be one of 'flat', 'hanning',\n 'hamming', 'bartlett', 'blackman'\"\"\"\n )\n\n s = np.r_[\n 2 * data[0] - data[window_len:1:-1],\n data,\n 2 * data[-1] - data[-1:-window_len:-1],\n ]\n\n w = windows[window](window_len)\n\n data = np.convolve(w / w.sum(), s, mode=\"same\")\n data = data[window_len - 1 : -window_len + 1]\n return pd.Series(data, index=series.index, name=series.name)\n"
] | import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas_flavor import register_dataframe_method, register_series_method
def _rolling_window(data, block):
"""
http://stackoverflow.com/questions/4936620/
Using strides for an efficient moving average filter.
"""
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@register_series_method
@register_dataframe_method
def remove_above_water(df):
new_df = df.copy()
return new_df[new_df.index >= 0]
@register_series_method
@register_dataframe_method
def split(df):
"""Returns a tuple with down/up-cast."""
idx = df.index.argmax() + 1
down = df.iloc[:idx]
# Reverse index to orient it as a CTD cast.
up = df.iloc[idx:][::-1]
return down, up
@register_series_method
@register_dataframe_method
def lp_filter(df, sample_rate=24.0, time_constant=0.15):
"""
Filter a series with `time_constant` (use 0.15 s for pressure), and for
a signal of `sample_rate` in Hertz (24 Hz for 911+).
NOTE: 911+ systems do not require filter for temperature nor salinity.
Examples
--------
>>> from pathlib import Path
>>> import matplotlib.pyplot as plt
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))
>>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))
>>> kw = {"sample_rate": 24.0, "time_constant": 0.15}
>>> original = prc.index.values
>>> unfiltered = raw.index.values
>>> filtered = raw.lp_filter(**kw).index.values
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(original, 'k', label='original')
>>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')
>>> l3, = ax.plot(filtered, 'g', label='filtered')
>>> leg = ax.legend()
Notes
-----
https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
"""
from scipy import signal
# Butter is closer to what SBE is doing with their cosine filter.
Wn = (1.0 / time_constant) / (sample_rate * 2.0)
b, a = signal.butter(2, Wn, "low")
new_df = df.copy()
new_df.index = signal.filtfilt(b, a, df.index.values)
return new_df
@register_series_method
@register_dataframe_method
def press_check(df):
"""
Remove pressure reversals from the index.
"""
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df
@register_series_method
@register_dataframe_method
def bindata(df, delta=1.0, method="average"):
"""
Bin average the index (usually pressure) to a given interval (default
delta = 1).
"""
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df
def _despike(series, n1, n2, block, keep):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
data = series.values.astype(float).copy()
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n1 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
data[mask] = np.NaN
# Pass two recompute the mean and std without the flagged values from pass
# one and removed the flagged data.
roll = _rolling_window(data, block)
roll = ma.masked_invalid(roll)
std = n2 * roll.std(axis=1)
mean = roll.mean(axis=1)
# Use the last value to fill-up.
std = np.r_[std, np.tile(std[-1], block - 1)]
mean = np.r_[mean, np.tile(mean[-1], block - 1)]
values = series.values.astype(float)
mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled(
fill_value=np.NaN
)
clean = series.astype(float).copy()
clean[mask] = np.NaN
return clean
@register_series_method
@register_dataframe_method
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df
def _smooth(series, window_len, window):
"""Smooth the data using a window with requested size."""
windows = {
"flat": np.ones,
"hanning": np.hanning,
"hamming": np.hamming,
"bartlett": np.bartlett,
"blackman": np.blackman,
}
data = series.values.copy()
if window_len < 3:
return pd.Series(data, index=series.index, name=series.name)
if window not in list(windows.keys()):
raise ValueError(
"""window must be one of 'flat', 'hanning',
'hamming', 'bartlett', 'blackman'"""
)
s = np.r_[
2 * data[0] - data[window_len:1:-1],
data,
2 * data[-1] - data[-1:-window_len:-1],
]
w = windows[window](window_len)
data = np.convolve(w / w.sum(), s, mode="same")
data = data[window_len - 1 : -window_len + 1]
return pd.Series(data, index=series.index, name=series.name)
@register_series_method
@register_dataframe_method
def _movingaverage(series, window_size=48):
window = np.ones(int(window_size)) / float(window_size)
return pd.Series(np.convolve(series, window, "same"), index=series.index)
@register_series_method
@register_dataframe_method
def movingaverage(df, window_size=48):
if isinstance(df, pd.Series):
new_df = _movingaverage(df, window_size=window_size)
else:
new_df = df.apply(_movingaverage, window_size=window_size)
return new_df
|
kmadac/bitstamp-python-client | bitstamp/client.py | BaseClient._get | python | def _get(self, *args, **kwargs):
return self._request(requests.get, *args, **kwargs) | Make a GET request. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L35-L39 | [
"def _request(self, func, url, version=1, *args, **kwargs):\n \"\"\"\n Make a generic request, adding in any proxy defined by the instance.\n\n Raises a ``requests.HTTPError`` if the response status isn't 200, and\n raises a :class:`BitstampError` if the response contains a json encoded\n error message.\n \"\"\"\n return_json = kwargs.pop('return_json', False)\n url = self.api_url[version] + url\n response = func(url, *args, **kwargs)\n\n if 'proxies' not in kwargs:\n kwargs['proxies'] = self.proxydict\n\n # Check for error, raising an exception if appropriate.\n response.raise_for_status()\n\n try:\n json_response = response.json()\n except ValueError:\n json_response = None\n if isinstance(json_response, dict):\n error = json_response.get('error')\n if error:\n raise BitstampError(error)\n elif json_response.get('status') == \"error\":\n raise BitstampError(json_response.get('reason'))\n\n if return_json:\n if json_response is None:\n raise BitstampError(\n \"Could not decode json for: \" + response.text)\n return json_response\n\n return response\n"
] | class BaseClient(object):
"""
A base class for the API Client methods that handles interaction with
the requests library.
"""
api_url = {1: 'https://www.bitstamp.net/api/',
2: 'https://www.bitstamp.net/api/v2/'}
exception_on_error = True
def __init__(self, proxydict=None, *args, **kwargs):
self.proxydict = proxydict
def _post(self, *args, **kwargs):
"""
Make a POST request.
"""
data = self._default_data()
data.update(kwargs.get('data') or {})
kwargs['data'] = data
return self._request(requests.post, *args, **kwargs)
def _default_data(self):
"""
Default data for a POST request.
"""
return {}
def _construct_url(self, url, base, quote):
"""
Adds the orderbook to the url if base and quote are specified.
"""
if not base and not quote:
return url
else:
url = url + base.lower() + quote.lower() + "/"
return url
def _request(self, func, url, version=1, *args, **kwargs):
"""
Make a generic request, adding in any proxy defined by the instance.
Raises a ``requests.HTTPError`` if the response status isn't 200, and
raises a :class:`BitstampError` if the response contains a json encoded
error message.
"""
return_json = kwargs.pop('return_json', False)
url = self.api_url[version] + url
response = func(url, *args, **kwargs)
if 'proxies' not in kwargs:
kwargs['proxies'] = self.proxydict
# Check for error, raising an exception if appropriate.
response.raise_for_status()
try:
json_response = response.json()
except ValueError:
json_response = None
if isinstance(json_response, dict):
error = json_response.get('error')
if error:
raise BitstampError(error)
elif json_response.get('status') == "error":
raise BitstampError(json_response.get('reason'))
if return_json:
if json_response is None:
raise BitstampError(
"Could not decode json for: " + response.text)
return json_response
return response
|
kmadac/bitstamp-python-client | bitstamp/client.py | BaseClient._post | python | def _post(self, *args, **kwargs):
data = self._default_data()
data.update(kwargs.get('data') or {})
kwargs['data'] = data
return self._request(requests.post, *args, **kwargs) | Make a POST request. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L41-L48 | [
"def _request(self, func, url, version=1, *args, **kwargs):\n \"\"\"\n Make a generic request, adding in any proxy defined by the instance.\n\n Raises a ``requests.HTTPError`` if the response status isn't 200, and\n raises a :class:`BitstampError` if the response contains a json encoded\n error message.\n \"\"\"\n return_json = kwargs.pop('return_json', False)\n url = self.api_url[version] + url\n response = func(url, *args, **kwargs)\n\n if 'proxies' not in kwargs:\n kwargs['proxies'] = self.proxydict\n\n # Check for error, raising an exception if appropriate.\n response.raise_for_status()\n\n try:\n json_response = response.json()\n except ValueError:\n json_response = None\n if isinstance(json_response, dict):\n error = json_response.get('error')\n if error:\n raise BitstampError(error)\n elif json_response.get('status') == \"error\":\n raise BitstampError(json_response.get('reason'))\n\n if return_json:\n if json_response is None:\n raise BitstampError(\n \"Could not decode json for: \" + response.text)\n return json_response\n\n return response\n",
"def _default_data(self, *args, **kwargs):\n \"\"\"\n Generate a one-time signature and other data required to send a secure\n POST request to the Bitstamp API.\n \"\"\"\n data = super(Trading, self)._default_data(*args, **kwargs)\n data['key'] = self.key\n nonce = self.get_nonce()\n msg = str(nonce) + self.username + self.key\n\n signature = hmac.new(\n self.secret.encode('utf-8'), msg=msg.encode('utf-8'),\n digestmod=hashlib.sha256).hexdigest().upper()\n data['signature'] = signature\n data['nonce'] = nonce\n return data\n"
] | class BaseClient(object):
"""
A base class for the API Client methods that handles interaction with
the requests library.
"""
api_url = {1: 'https://www.bitstamp.net/api/',
2: 'https://www.bitstamp.net/api/v2/'}
exception_on_error = True
def __init__(self, proxydict=None, *args, **kwargs):
self.proxydict = proxydict
def _get(self, *args, **kwargs):
"""
Make a GET request.
"""
return self._request(requests.get, *args, **kwargs)
def _default_data(self):
"""
Default data for a POST request.
"""
return {}
def _construct_url(self, url, base, quote):
"""
Adds the orderbook to the url if base and quote are specified.
"""
if not base and not quote:
return url
else:
url = url + base.lower() + quote.lower() + "/"
return url
def _request(self, func, url, version=1, *args, **kwargs):
"""
Make a generic request, adding in any proxy defined by the instance.
Raises a ``requests.HTTPError`` if the response status isn't 200, and
raises a :class:`BitstampError` if the response contains a json encoded
error message.
"""
return_json = kwargs.pop('return_json', False)
url = self.api_url[version] + url
response = func(url, *args, **kwargs)
if 'proxies' not in kwargs:
kwargs['proxies'] = self.proxydict
# Check for error, raising an exception if appropriate.
response.raise_for_status()
try:
json_response = response.json()
except ValueError:
json_response = None
if isinstance(json_response, dict):
error = json_response.get('error')
if error:
raise BitstampError(error)
elif json_response.get('status') == "error":
raise BitstampError(json_response.get('reason'))
if return_json:
if json_response is None:
raise BitstampError(
"Could not decode json for: " + response.text)
return json_response
return response
|
kmadac/bitstamp-python-client | bitstamp/client.py | BaseClient._construct_url | python | def _construct_url(self, url, base, quote):
if not base and not quote:
return url
else:
url = url + base.lower() + quote.lower() + "/"
return url | Adds the orderbook to the url if base and quote are specified. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L56-L64 | null | class BaseClient(object):
"""
A base class for the API Client methods that handles interaction with
the requests library.
"""
api_url = {1: 'https://www.bitstamp.net/api/',
2: 'https://www.bitstamp.net/api/v2/'}
exception_on_error = True
def __init__(self, proxydict=None, *args, **kwargs):
self.proxydict = proxydict
def _get(self, *args, **kwargs):
"""
Make a GET request.
"""
return self._request(requests.get, *args, **kwargs)
def _post(self, *args, **kwargs):
"""
Make a POST request.
"""
data = self._default_data()
data.update(kwargs.get('data') or {})
kwargs['data'] = data
return self._request(requests.post, *args, **kwargs)
def _default_data(self):
"""
Default data for a POST request.
"""
return {}
def _request(self, func, url, version=1, *args, **kwargs):
"""
Make a generic request, adding in any proxy defined by the instance.
Raises a ``requests.HTTPError`` if the response status isn't 200, and
raises a :class:`BitstampError` if the response contains a json encoded
error message.
"""
return_json = kwargs.pop('return_json', False)
url = self.api_url[version] + url
response = func(url, *args, **kwargs)
if 'proxies' not in kwargs:
kwargs['proxies'] = self.proxydict
# Check for error, raising an exception if appropriate.
response.raise_for_status()
try:
json_response = response.json()
except ValueError:
json_response = None
if isinstance(json_response, dict):
error = json_response.get('error')
if error:
raise BitstampError(error)
elif json_response.get('status') == "error":
raise BitstampError(json_response.get('reason'))
if return_json:
if json_response is None:
raise BitstampError(
"Could not decode json for: " + response.text)
return json_response
return response
|
kmadac/bitstamp-python-client | bitstamp/client.py | BaseClient._request | python | def _request(self, func, url, version=1, *args, **kwargs):
return_json = kwargs.pop('return_json', False)
url = self.api_url[version] + url
response = func(url, *args, **kwargs)
if 'proxies' not in kwargs:
kwargs['proxies'] = self.proxydict
# Check for error, raising an exception if appropriate.
response.raise_for_status()
try:
json_response = response.json()
except ValueError:
json_response = None
if isinstance(json_response, dict):
error = json_response.get('error')
if error:
raise BitstampError(error)
elif json_response.get('status') == "error":
raise BitstampError(json_response.get('reason'))
if return_json:
if json_response is None:
raise BitstampError(
"Could not decode json for: " + response.text)
return json_response
return response | Make a generic request, adding in any proxy defined by the instance.
Raises a ``requests.HTTPError`` if the response status isn't 200, and
raises a :class:`BitstampError` if the response contains a json encoded
error message. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L66-L101 | null | class BaseClient(object):
"""
A base class for the API Client methods that handles interaction with
the requests library.
"""
api_url = {1: 'https://www.bitstamp.net/api/',
2: 'https://www.bitstamp.net/api/v2/'}
exception_on_error = True
def __init__(self, proxydict=None, *args, **kwargs):
self.proxydict = proxydict
def _get(self, *args, **kwargs):
"""
Make a GET request.
"""
return self._request(requests.get, *args, **kwargs)
def _post(self, *args, **kwargs):
"""
Make a POST request.
"""
data = self._default_data()
data.update(kwargs.get('data') or {})
kwargs['data'] = data
return self._request(requests.post, *args, **kwargs)
def _default_data(self):
"""
Default data for a POST request.
"""
return {}
def _construct_url(self, url, base, quote):
"""
Adds the orderbook to the url if base and quote are specified.
"""
if not base and not quote:
return url
else:
url = url + base.lower() + quote.lower() + "/"
return url
|
kmadac/bitstamp-python-client | bitstamp/client.py | Public.ticker | python | def ticker(self, base="btc", quote="usd"):
url = self._construct_url("ticker/", base, quote)
return self._get(url, return_json=True, version=2) | Returns dictionary. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L106-L111 | [
"def _get(self, *args, **kwargs):\n \"\"\"\n Make a GET request.\n \"\"\"\n return self._request(requests.get, *args, **kwargs)\n",
"def _construct_url(self, url, base, quote):\n \"\"\"\n Adds the orderbook to the url if base and quote are specified.\n \"\"\"\n if not base and not quote:\n return url\n else:\n url = url + base.lower() + quote.lower() + \"/\"\n return url\n"
] | class Public(BaseClient):
def ticker_hour(self, base="btc", quote="usd"):
"""
Returns dictionary of the average ticker of the past hour.
"""
url = self._construct_url("ticker_hour/", base, quote)
return self._get(url, return_json=True, version=2)
def order_book(self, group=True, base="btc", quote="usd"):
"""
Returns dictionary with "bids" and "asks".
Each is a list of open orders and each order is represented as a list
of price and amount.
"""
params = {'group': group}
url = self._construct_url("order_book/", base, quote)
return self._get(url, params=params, return_json=True, version=2)
def transactions(self, time=TransRange.HOUR, base="btc", quote="usd"):
"""
Returns transactions for the last 'timedelta' seconds.
Parameter time is specified by one of two values of TransRange class.
"""
params = {'time': time}
url = self._construct_url("transactions/", base, quote)
return self._get(url, params=params, return_json=True, version=2)
def conversion_rate_usd_eur(self):
"""
Returns simple dictionary::
{'buy': 'buy conversion rate', 'sell': 'sell conversion rate'}
"""
return self._get("eur_usd/", return_json=True, version=1)
def trading_pairs_info(self):
"""
Returns list of dictionaries specifying details of each available trading pair::
{
'description':'Litecoin / U.S. dollar',
'name':'LTC/USD',
'url_symbol':'ltcusd',
'trading':'Enabled',
'minimum_order':'5.0 USD',
'counter_decimals':2,
'base_decimals':8
},
"""
return self._get("trading-pairs-info/", return_json=True, version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Public.order_book | python | def order_book(self, group=True, base="btc", quote="usd"):
params = {'group': group}
url = self._construct_url("order_book/", base, quote)
return self._get(url, params=params, return_json=True, version=2) | Returns dictionary with "bids" and "asks".
Each is a list of open orders and each order is represented as a list
of price and amount. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L120-L129 | [
"def _get(self, *args, **kwargs):\n \"\"\"\n Make a GET request.\n \"\"\"\n return self._request(requests.get, *args, **kwargs)\n",
"def _construct_url(self, url, base, quote):\n \"\"\"\n Adds the orderbook to the url if base and quote are specified.\n \"\"\"\n if not base and not quote:\n return url\n else:\n url = url + base.lower() + quote.lower() + \"/\"\n return url\n"
] | class Public(BaseClient):
def ticker(self, base="btc", quote="usd"):
"""
Returns dictionary.
"""
url = self._construct_url("ticker/", base, quote)
return self._get(url, return_json=True, version=2)
def ticker_hour(self, base="btc", quote="usd"):
"""
Returns dictionary of the average ticker of the past hour.
"""
url = self._construct_url("ticker_hour/", base, quote)
return self._get(url, return_json=True, version=2)
def transactions(self, time=TransRange.HOUR, base="btc", quote="usd"):
"""
Returns transactions for the last 'timedelta' seconds.
Parameter time is specified by one of two values of TransRange class.
"""
params = {'time': time}
url = self._construct_url("transactions/", base, quote)
return self._get(url, params=params, return_json=True, version=2)
def conversion_rate_usd_eur(self):
"""
Returns simple dictionary::
{'buy': 'buy conversion rate', 'sell': 'sell conversion rate'}
"""
return self._get("eur_usd/", return_json=True, version=1)
def trading_pairs_info(self):
"""
Returns list of dictionaries specifying details of each available trading pair::
{
'description':'Litecoin / U.S. dollar',
'name':'LTC/USD',
'url_symbol':'ltcusd',
'trading':'Enabled',
'minimum_order':'5.0 USD',
'counter_decimals':2,
'base_decimals':8
},
"""
return self._get("trading-pairs-info/", return_json=True, version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Public.transactions | python | def transactions(self, time=TransRange.HOUR, base="btc", quote="usd"):
params = {'time': time}
url = self._construct_url("transactions/", base, quote)
return self._get(url, params=params, return_json=True, version=2) | Returns transactions for the last 'timedelta' seconds.
Parameter time is specified by one of two values of TransRange class. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L131-L138 | [
"def _get(self, *args, **kwargs):\n \"\"\"\n Make a GET request.\n \"\"\"\n return self._request(requests.get, *args, **kwargs)\n",
"def _construct_url(self, url, base, quote):\n \"\"\"\n Adds the orderbook to the url if base and quote are specified.\n \"\"\"\n if not base and not quote:\n return url\n else:\n url = url + base.lower() + quote.lower() + \"/\"\n return url\n"
] | class Public(BaseClient):
def ticker(self, base="btc", quote="usd"):
"""
Returns dictionary.
"""
url = self._construct_url("ticker/", base, quote)
return self._get(url, return_json=True, version=2)
def ticker_hour(self, base="btc", quote="usd"):
"""
Returns dictionary of the average ticker of the past hour.
"""
url = self._construct_url("ticker_hour/", base, quote)
return self._get(url, return_json=True, version=2)
def order_book(self, group=True, base="btc", quote="usd"):
"""
Returns dictionary with "bids" and "asks".
Each is a list of open orders and each order is represented as a list
of price and amount.
"""
params = {'group': group}
url = self._construct_url("order_book/", base, quote)
return self._get(url, params=params, return_json=True, version=2)
def conversion_rate_usd_eur(self):
"""
Returns simple dictionary::
{'buy': 'buy conversion rate', 'sell': 'sell conversion rate'}
"""
return self._get("eur_usd/", return_json=True, version=1)
def trading_pairs_info(self):
"""
Returns list of dictionaries specifying details of each available trading pair::
{
'description':'Litecoin / U.S. dollar',
'name':'LTC/USD',
'url_symbol':'ltcusd',
'trading':'Enabled',
'minimum_order':'5.0 USD',
'counter_decimals':2,
'base_decimals':8
},
"""
return self._get("trading-pairs-info/", return_json=True, version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.get_nonce | python | def get_nonce(self):
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce | Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L177-L195 | null | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading._default_data | python | def _default_data(self, *args, **kwargs):
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data | Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L197-L212 | null | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.account_balance | python | def account_balance(self, base="btc", quote="usd"):
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2) | Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L223-L246 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n",
"def _construct_url(self, url, base, quote):\n \"\"\"\n Adds the orderbook to the url if base and quote are specified.\n \"\"\"\n if not base and not quote:\n return url\n else:\n url = url + base.lower() + quote.lower() + \"/\"\n return url\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.user_transactions | python | def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2) | Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L248-L269 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n",
"def _construct_url(self, url, base, quote):\n \"\"\"\n Adds the orderbook to the url if base and quote are specified.\n \"\"\"\n if not base and not quote:\n return url\n else:\n url = url + base.lower() + quote.lower() + \"/\"\n return url\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.order_status | python | def order_status(self, order_id):
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1) | Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, .... | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L279-L293 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.cancel_order | python | def cancel_order(self, order_id, version=1):
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version) | Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L295-L309 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.buy_limit_order | python | def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2) | Order to buy amount of bitcoins for specified price. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L320-L328 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n",
"def _construct_url(self, url, base, quote):\n \"\"\"\n Adds the orderbook to the url if base and quote are specified.\n \"\"\"\n if not base and not quote:\n return url\n else:\n url = url + base.lower() + quote.lower() + \"/\"\n return url\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.buy_market_order | python | def buy_market_order(self, amount, base="btc", quote="usd"):
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2) | Order to buy amount of bitcoins for market price. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L330-L336 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n",
"def _construct_url(self, url, base, quote):\n \"\"\"\n Adds the orderbook to the url if base and quote are specified.\n \"\"\"\n if not base and not quote:\n return url\n else:\n url = url + base.lower() + quote.lower() + \"/\"\n return url\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.check_bitstamp_code | python | def check_bitstamp_code(self, code):
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1) | Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L356-L363 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.redeem_bitstamp_code | python | def redeem_bitstamp_code(self, code):
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1) | Returns JSON dictionary containing USD and BTC amount added to user's
account. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L365-L372 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.withdrawal_requests | python | def withdrawal_requests(self, timedelta = 86400):
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data) | Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L374-L383 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.litecoin_withdrawal | python | def litecoin_withdrawal(self, amount, address):
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2) | Send litecoins to another litecoin wallet specified by address. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L415-L421 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.ripple_withdrawal | python | def ripple_withdrawal(self, amount, address, currency):
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response) | Returns true if successful. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L445-L452 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n",
"def _expect_true(self, response):\n \"\"\"\n A shortcut that raises a :class:`BitstampError` if the response didn't\n just contain the text 'true'.\n \"\"\"\n if response.text == u'true':\n return True\n raise BitstampError(\"Unexpected response\")\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.xrp_withdrawal | python | def xrp_withdrawal(self, amount, address, destination_tag=None):
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"] | Sends xrps to another xrp wallet specified by address. Returns withdrawal id. | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L461-L470 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
kmadac/bitstamp-python-client | bitstamp/client.py | Trading.transfer_to_main | python | def transfer_to_main(self, amount, currency, subaccount=None):
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2) | Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name | train | https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L495-L505 | [
"def _post(self, *args, **kwargs):\n \"\"\"\n Make a POST request.\n \"\"\"\n data = self._default_data()\n data.update(kwargs.get('data') or {})\n kwargs['data'] = data\n return self._request(requests.post, *args, **kwargs)\n"
] | class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
|
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD.check_payment | python | def check_payment(self, payment):
validation = ""
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if not isinstance(payment['mandate_date'], datetime.date):
validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['mandate_date'] = str(payment['mandate_date'])
if not isinstance(payment['collection_date'], datetime.date):
validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['collection_date'] = str(payment['collection_date'])
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation) | Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L38-L61 | null | class SepaDD(SepaPaymentInitn):
"""
This class creates a Sepa Direct Debit XML File.
"""
root_el = "CstmrDrctDbtInitn"
def __init__(self, config, schema="pain.008.002.02", clean=True):
if "instrument" not in config:
config["instrument"] = "CORE"
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "batch", "creditor_id", "currency"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node
"""
# Retrieve the node to which we will append the group header.
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
SupId_node = ET.Element("Id")
OrgId_node = ET.Element("OrgId")
Othr_node = ET.Element("Othr")
Id_node = ET.Element("Id")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
Id_node.text = self._config['creditor_id']
# Append the nodes
Othr_node.append(Id_node)
OrgId_node.append(Othr_node)
SupId_node.append(OrgId_node)
InitgPty_node.append(Nm_node)
InitgPty_node.append(SupId_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrDrctDbtInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['LclInstrmNode'] = ET.Element("LclInstrm")
ED['Cd_LclInstrm_Node'] = ET.Element("Cd")
ED['SeqTpNode'] = ET.Element("SeqTp")
ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId")
ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm")
ED['Id_CdtrSchmeId_Node'] = ET.Element("Id")
ED['PrvtIdNode'] = ET.Element("PrvtId")
ED['OthrNode'] = ET.Element("Othr")
ED['Id_Othr_Node'] = ET.Element("Id")
ED['SchmeNmNode'] = ET.Element("SchmeNm")
ED['PrtryNode'] = ET.Element("Prtry")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment['type'] + "::" + payment['collection_date']
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
batch_meta_split = batch_meta.split("::")
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]
PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(
PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(
PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(
PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD.add_payment | python | def add_payment(self, payment):
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes) | Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L63-L127 | [
"def int_to_decimal_str(integer):\n \"\"\"\n Helper to convert integers (representing cents) into decimal currency\n string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT\n ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.\n @param integer The amount in cents\n @return string The amount in currency with full stop decimal separator\n \"\"\"\n int_string = str(integer)\n if len(int_string) < 2:\n return \"0.\" + int_string.zfill(2)\n else:\n return int_string[:-2] + \".\" + int_string[-2:]\n",
"def make_id(name):\n \"\"\"\n Create a random id combined with the creditor name.\n @return string consisting of name (truncated at 22 chars), -,\n 12 char rand hex string.\n \"\"\"\n name = re.sub(r'[^a-zA-Z0-9]', '', name)\n r = get_rand_string(12)\n if len(name) > 22:\n name = name[:22]\n return name + \"-\" + r\n",
"def check_payment(self, payment):\n \"\"\"\n Check the payment for required fields and validity.\n @param payment: The payment dict\n @return: True if valid, error string if invalid paramaters where\n encountered.\n \"\"\"\n validation = \"\"\n\n if not isinstance(payment['amount'], int):\n validation += \"AMOUNT_NOT_INTEGER \"\n\n if not isinstance(payment['mandate_date'], datetime.date):\n validation += \"MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE\"\n payment['mandate_date'] = str(payment['mandate_date'])\n\n if not isinstance(payment['collection_date'], datetime.date):\n validation += \"COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE\"\n payment['collection_date'] = str(payment['collection_date'])\n\n if validation == \"\":\n return True\n else:\n raise Exception('Payment did not validate: ' + validation)\n",
"def _create_PmtInf_node(self):\n \"\"\"\n Method to create the blank payment information nodes as a dict.\n \"\"\"\n ED = dict() # ED is element dict\n ED['PmtInfNode'] = ET.Element(\"PmtInf\")\n ED['PmtInfIdNode'] = ET.Element(\"PmtInfId\")\n ED['PmtMtdNode'] = ET.Element(\"PmtMtd\")\n ED['BtchBookgNode'] = ET.Element(\"BtchBookg\")\n ED['NbOfTxsNode'] = ET.Element(\"NbOfTxs\")\n ED['CtrlSumNode'] = ET.Element(\"CtrlSum\")\n ED['PmtTpInfNode'] = ET.Element(\"PmtTpInf\")\n ED['SvcLvlNode'] = ET.Element(\"SvcLvl\")\n ED['Cd_SvcLvl_Node'] = ET.Element(\"Cd\")\n ED['LclInstrmNode'] = ET.Element(\"LclInstrm\")\n ED['Cd_LclInstrm_Node'] = ET.Element(\"Cd\")\n ED['SeqTpNode'] = ET.Element(\"SeqTp\")\n ED['ReqdColltnDtNode'] = ET.Element(\"ReqdColltnDt\")\n ED['CdtrNode'] = ET.Element(\"Cdtr\")\n ED['Nm_Cdtr_Node'] = ET.Element(\"Nm\")\n ED['CdtrAcctNode'] = ET.Element(\"CdtrAcct\")\n ED['Id_CdtrAcct_Node'] = ET.Element(\"Id\")\n ED['IBAN_CdtrAcct_Node'] = ET.Element(\"IBAN\")\n ED['CdtrAgtNode'] = ET.Element(\"CdtrAgt\")\n ED['FinInstnId_CdtrAgt_Node'] = ET.Element(\"FinInstnId\")\n if 'BIC' in self._config:\n ED['BIC_CdtrAgt_Node'] = ET.Element(\"BIC\")\n ED['ChrgBrNode'] = ET.Element(\"ChrgBr\")\n ED['CdtrSchmeIdNode'] = ET.Element(\"CdtrSchmeId\")\n ED['Nm_CdtrSchmeId_Node'] = ET.Element(\"Nm\")\n ED['Id_CdtrSchmeId_Node'] = ET.Element(\"Id\")\n ED['PrvtIdNode'] = ET.Element(\"PrvtId\")\n ED['OthrNode'] = ET.Element(\"Othr\")\n ED['Id_Othr_Node'] = ET.Element(\"Id\")\n ED['SchmeNmNode'] = ET.Element(\"SchmeNm\")\n ED['PrtryNode'] = ET.Element(\"Prtry\")\n return ED\n",
"def _create_TX_node(self, bic=True):\n \"\"\"\n Method to create the blank transaction nodes as a dict. If bic is True,\n the BIC node will also be created.\n \"\"\"\n ED = dict()\n ED['DrctDbtTxInfNode'] = ET.Element(\"DrctDbtTxInf\")\n ED['PmtIdNode'] = ET.Element(\"PmtId\")\n ED['EndToEndIdNode'] = ET.Element(\"EndToEndId\")\n ED['InstdAmtNode'] = ET.Element(\"InstdAmt\")\n ED['DrctDbtTxNode'] = ET.Element(\"DrctDbtTx\")\n ED['MndtRltdInfNode'] = ET.Element(\"MndtRltdInf\")\n ED['MndtIdNode'] = ET.Element(\"MndtId\")\n ED['DtOfSgntrNode'] = ET.Element(\"DtOfSgntr\")\n ED['DbtrAgtNode'] = ET.Element(\"DbtrAgt\")\n ED['FinInstnId_DbtrAgt_Node'] = ET.Element(\"FinInstnId\")\n if bic:\n ED['BIC_DbtrAgt_Node'] = ET.Element(\"BIC\")\n ED['DbtrNode'] = ET.Element(\"Dbtr\")\n ED['Nm_Dbtr_Node'] = ET.Element(\"Nm\")\n ED['DbtrAcctNode'] = ET.Element(\"DbtrAcct\")\n ED['Id_DbtrAcct_Node'] = ET.Element(\"Id\")\n ED['IBAN_DbtrAcct_Node'] = ET.Element(\"IBAN\")\n ED['RmtInfNode'] = ET.Element(\"RmtInf\")\n ED['UstrdNode'] = ET.Element(\"Ustrd\")\n return ED\n",
"def _add_non_batch(self, TX_nodes, PmtInf_nodes):\n \"\"\"\n Method to add a transaction as non batch, will fold the transaction\n together with the payment info node and append to the main xml.\n \"\"\"\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])\n\n PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])\n PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])\n PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])\n PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])\n PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])\n\n PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])\n\n PmtInf_nodes['Id_CdtrAcct_Node'].append(\n PmtInf_nodes['IBAN_CdtrAcct_Node'])\n PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])\n\n if 'BIC' in self._config:\n PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(\n PmtInf_nodes['BIC_CdtrAgt_Node'])\n PmtInf_nodes['CdtrAgtNode'].append(\n PmtInf_nodes['FinInstnId_CdtrAgt_Node'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])\n\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])\n\n if self.schema == 'pain.008.001.02':\n PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])\n PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])\n PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])\n PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])\n PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])\n PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])\n PmtInf_nodes['CdtrSchmeIdNode'].append(\n PmtInf_nodes['Id_CdtrSchmeId_Node'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])\n\n TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])\n\n TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])\n TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])\n TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])\n\n if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:\n TX_nodes['FinInstnId_DbtrAgt_Node'].append(\n TX_nodes['BIC_DbtrAgt_Node'])\n TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])\n\n TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])\n\n TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])\n TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])\n\n TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])\n PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])\n CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')\n CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])\n",
"def _add_batch(self, TX_nodes, payment):\n \"\"\"\n Method to add a payment as a batch. The transaction details are already\n present. Will fold the nodes accordingly and the call the\n _add_to_batch_list function to store the batch.\n \"\"\"\n TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])\n\n TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])\n TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])\n TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])\n\n if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:\n TX_nodes['FinInstnId_DbtrAgt_Node'].append(\n TX_nodes['BIC_DbtrAgt_Node'])\n TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])\n\n TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])\n\n TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])\n TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])\n\n TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])\n TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])\n self._add_to_batch_list(TX_nodes, payment)\n"
] | class SepaDD(SepaPaymentInitn):
"""
This class creates a Sepa Direct Debit XML File.
"""
root_el = "CstmrDrctDbtInitn"
def __init__(self, config, schema="pain.008.002.02", clean=True):
if "instrument" not in config:
config["instrument"] = "CORE"
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "batch", "creditor_id", "currency"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if not isinstance(payment['mandate_date'], datetime.date):
validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['mandate_date'] = str(payment['mandate_date'])
if not isinstance(payment['collection_date'], datetime.date):
validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['collection_date'] = str(payment['collection_date'])
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node
"""
# Retrieve the node to which we will append the group header.
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
SupId_node = ET.Element("Id")
OrgId_node = ET.Element("OrgId")
Othr_node = ET.Element("Othr")
Id_node = ET.Element("Id")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
Id_node.text = self._config['creditor_id']
# Append the nodes
Othr_node.append(Id_node)
OrgId_node.append(Othr_node)
SupId_node.append(OrgId_node)
InitgPty_node.append(Nm_node)
InitgPty_node.append(SupId_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrDrctDbtInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['LclInstrmNode'] = ET.Element("LclInstrm")
ED['Cd_LclInstrm_Node'] = ET.Element("Cd")
ED['SeqTpNode'] = ET.Element("SeqTp")
ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId")
ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm")
ED['Id_CdtrSchmeId_Node'] = ET.Element("Id")
ED['PrvtIdNode'] = ET.Element("PrvtId")
ED['OthrNode'] = ET.Element("Othr")
ED['Id_Othr_Node'] = ET.Element("Id")
ED['SchmeNmNode'] = ET.Element("SchmeNm")
ED['PrtryNode'] = ET.Element("Prtry")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment['type'] + "::" + payment['collection_date']
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
batch_meta_split = batch_meta.split("::")
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]
PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(
PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(
PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(
PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._create_header | python | def _create_header(self):
# Retrieve the node to which we will append the group header.
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
SupId_node = ET.Element("Id")
OrgId_node = ET.Element("OrgId")
Othr_node = ET.Element("Othr")
Id_node = ET.Element("Id")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
Id_node.text = self._config['creditor_id']
# Append the nodes
Othr_node.append(Id_node)
OrgId_node.append(Othr_node)
SupId_node.append(OrgId_node)
InitgPty_node.append(Nm_node)
InitgPty_node.append(SupId_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrDrctDbtInitn_node.append(GrpHdr_node) | Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L129-L169 | null | class SepaDD(SepaPaymentInitn):
"""
This class creates a Sepa Direct Debit XML File.
"""
root_el = "CstmrDrctDbtInitn"
def __init__(self, config, schema="pain.008.002.02", clean=True):
if "instrument" not in config:
config["instrument"] = "CORE"
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "batch", "creditor_id", "currency"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if not isinstance(payment['mandate_date'], datetime.date):
validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['mandate_date'] = str(payment['mandate_date'])
if not isinstance(payment['collection_date'], datetime.date):
validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['collection_date'] = str(payment['collection_date'])
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['LclInstrmNode'] = ET.Element("LclInstrm")
ED['Cd_LclInstrm_Node'] = ET.Element("Cd")
ED['SeqTpNode'] = ET.Element("SeqTp")
ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId")
ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm")
ED['Id_CdtrSchmeId_Node'] = ET.Element("Id")
ED['PrvtIdNode'] = ET.Element("PrvtId")
ED['OthrNode'] = ET.Element("Othr")
ED['Id_Othr_Node'] = ET.Element("Id")
ED['SchmeNmNode'] = ET.Element("SchmeNm")
ED['PrtryNode'] = ET.Element("Prtry")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment['type'] + "::" + payment['collection_date']
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
batch_meta_split = batch_meta.split("::")
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]
PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(
PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(
PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(
PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._create_PmtInf_node | python | def _create_PmtInf_node(self):
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['LclInstrmNode'] = ET.Element("LclInstrm")
ED['Cd_LclInstrm_Node'] = ET.Element("Cd")
ED['SeqTpNode'] = ET.Element("SeqTp")
ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId")
ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm")
ED['Id_CdtrSchmeId_Node'] = ET.Element("Id")
ED['PrvtIdNode'] = ET.Element("PrvtId")
ED['OthrNode'] = ET.Element("Othr")
ED['Id_Othr_Node'] = ET.Element("Id")
ED['SchmeNmNode'] = ET.Element("SchmeNm")
ED['PrtryNode'] = ET.Element("Prtry")
return ED | Method to create the blank payment information nodes as a dict. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L171-L207 | null | class SepaDD(SepaPaymentInitn):
"""
This class creates a Sepa Direct Debit XML File.
"""
root_el = "CstmrDrctDbtInitn"
def __init__(self, config, schema="pain.008.002.02", clean=True):
if "instrument" not in config:
config["instrument"] = "CORE"
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "batch", "creditor_id", "currency"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if not isinstance(payment['mandate_date'], datetime.date):
validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['mandate_date'] = str(payment['mandate_date'])
if not isinstance(payment['collection_date'], datetime.date):
validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['collection_date'] = str(payment['collection_date'])
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node
"""
# Retrieve the node to which we will append the group header.
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
SupId_node = ET.Element("Id")
OrgId_node = ET.Element("OrgId")
Othr_node = ET.Element("Othr")
Id_node = ET.Element("Id")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
Id_node.text = self._config['creditor_id']
# Append the nodes
Othr_node.append(Id_node)
OrgId_node.append(Othr_node)
SupId_node.append(OrgId_node)
InitgPty_node.append(Nm_node)
InitgPty_node.append(SupId_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrDrctDbtInitn_node.append(GrpHdr_node)
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment['type'] + "::" + payment['collection_date']
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
batch_meta_split = batch_meta.split("::")
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]
PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(
PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(
PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(
PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._create_TX_node | python | def _create_TX_node(self, bic=True):
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED | Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L209-L234 | null | class SepaDD(SepaPaymentInitn):
"""
This class creates a Sepa Direct Debit XML File.
"""
root_el = "CstmrDrctDbtInitn"
def __init__(self, config, schema="pain.008.002.02", clean=True):
if "instrument" not in config:
config["instrument"] = "CORE"
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "batch", "creditor_id", "currency"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if not isinstance(payment['mandate_date'], datetime.date):
validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['mandate_date'] = str(payment['mandate_date'])
if not isinstance(payment['collection_date'], datetime.date):
validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['collection_date'] = str(payment['collection_date'])
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node
"""
# Retrieve the node to which we will append the group header.
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
SupId_node = ET.Element("Id")
OrgId_node = ET.Element("OrgId")
Othr_node = ET.Element("Othr")
Id_node = ET.Element("Id")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
Id_node.text = self._config['creditor_id']
# Append the nodes
Othr_node.append(Id_node)
OrgId_node.append(Othr_node)
SupId_node.append(OrgId_node)
InitgPty_node.append(Nm_node)
InitgPty_node.append(SupId_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrDrctDbtInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['LclInstrmNode'] = ET.Element("LclInstrm")
ED['Cd_LclInstrm_Node'] = ET.Element("Cd")
ED['SeqTpNode'] = ET.Element("SeqTp")
ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId")
ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm")
ED['Id_CdtrSchmeId_Node'] = ET.Element("Id")
ED['PrvtIdNode'] = ET.Element("PrvtId")
ED['OthrNode'] = ET.Element("Othr")
ED['Id_Othr_Node'] = ET.Element("Id")
ED['SchmeNmNode'] = ET.Element("SchmeNm")
ED['PrtryNode'] = ET.Element("Prtry")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment['type'] + "::" + payment['collection_date']
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
batch_meta_split = batch_meta.split("::")
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]
PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(
PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(
PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(
PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._add_non_batch | python | def _add_non_batch(self, TX_nodes, PmtInf_nodes):
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode']) | Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L236-L309 | null | class SepaDD(SepaPaymentInitn):
"""
This class creates a Sepa Direct Debit XML File.
"""
root_el = "CstmrDrctDbtInitn"
def __init__(self, config, schema="pain.008.002.02", clean=True):
if "instrument" not in config:
config["instrument"] = "CORE"
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "batch", "creditor_id", "currency"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if not isinstance(payment['mandate_date'], datetime.date):
validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['mandate_date'] = str(payment['mandate_date'])
if not isinstance(payment['collection_date'], datetime.date):
validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['collection_date'] = str(payment['collection_date'])
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node
"""
# Retrieve the node to which we will append the group header.
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
SupId_node = ET.Element("Id")
OrgId_node = ET.Element("OrgId")
Othr_node = ET.Element("Othr")
Id_node = ET.Element("Id")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
Id_node.text = self._config['creditor_id']
# Append the nodes
Othr_node.append(Id_node)
OrgId_node.append(Othr_node)
SupId_node.append(OrgId_node)
InitgPty_node.append(Nm_node)
InitgPty_node.append(SupId_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrDrctDbtInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['LclInstrmNode'] = ET.Element("LclInstrm")
ED['Cd_LclInstrm_Node'] = ET.Element("Cd")
ED['SeqTpNode'] = ET.Element("SeqTp")
ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId")
ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm")
ED['Id_CdtrSchmeId_Node'] = ET.Element("Id")
ED['PrvtIdNode'] = ET.Element("PrvtId")
ED['OthrNode'] = ET.Element("Othr")
ED['Id_Othr_Node'] = ET.Element("Id")
ED['SchmeNmNode'] = ET.Element("SchmeNm")
ED['PrtryNode'] = ET.Element("Prtry")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment['type'] + "::" + payment['collection_date']
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
batch_meta_split = batch_meta.split("::")
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]
PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(
PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(
PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(
PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._finalize_batch | python | def _finalize_batch(self):
for batch_meta, batch_nodes in self._batches.items():
batch_meta_split = batch_meta.split("::")
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]
PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(
PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(
PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(
PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode']) | Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L362-L443 | null | class SepaDD(SepaPaymentInitn):
"""
This class creates a Sepa Direct Debit XML File.
"""
root_el = "CstmrDrctDbtInitn"
def __init__(self, config, schema="pain.008.002.02", clean=True):
if "instrument" not in config:
config["instrument"] = "CORE"
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "batch", "creditor_id", "currency"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if not isinstance(payment['mandate_date'], datetime.date):
validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['mandate_date'] = str(payment['mandate_date'])
if not isinstance(payment['collection_date'], datetime.date):
validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['collection_date'] = str(payment['collection_date'])
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node
"""
# Retrieve the node to which we will append the group header.
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
SupId_node = ET.Element("Id")
OrgId_node = ET.Element("OrgId")
Othr_node = ET.Element("Othr")
Id_node = ET.Element("Id")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
Id_node.text = self._config['creditor_id']
# Append the nodes
Othr_node.append(Id_node)
OrgId_node.append(Othr_node)
SupId_node.append(OrgId_node)
InitgPty_node.append(Nm_node)
InitgPty_node.append(SupId_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrDrctDbtInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['LclInstrmNode'] = ET.Element("LclInstrm")
ED['Cd_LclInstrm_Node'] = ET.Element("Cd")
ED['SeqTpNode'] = ET.Element("SeqTp")
ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId")
ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm")
ED['Id_CdtrSchmeId_Node'] = ET.Element("Id")
ED['PrvtIdNode'] = ET.Element("PrvtId")
ED['OthrNode'] = ET.Element("Othr")
ED['Id_Othr_Node'] = ET.Element("Id")
ED['SchmeNmNode'] = ET.Element("SchmeNm")
ED['PrtryNode'] = ET.Element("Prtry")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment['type'] + "::" + payment['collection_date']
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['DrctDbtTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
|
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer.check_config | python | def check_config(self, config):
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation) | Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L17-L34 | null | class SepaTransfer(SepaPaymentInitn):
"""
This class creates a Sepa transfer XML File.
"""
root_el = "CstmrCdtTrfInitn"
def __init__(self, config, schema="pain.001.001.03", clean=True):
super().__init__(config, schema, clean)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
# Validate the payment
self.check_payment(payment)
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount']
)
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if 'execution_date' in payment:
PmtInf_nodes['ReqdExctnDtNode'].text = payment['execution_date']
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['EndToEnd_PmtId_Node'].text = payment.get('endtoend_id', 'NOTPROVIDED')
if bic:
TX_nodes['BIC_CdtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Cdtr_Node'].text = payment['name']
TX_nodes['IBAN_CdtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['CdtTrfTxInfNode'] = ET.Element("CdtTrfTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEnd_PmtId_Node'] = ET.Element("EndToEndId")
ED['AmtNode'] = ET.Element("Amt")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer.check_payment | python | def check_payment(self, payment):
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation) | Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L36-L61 | null | class SepaTransfer(SepaPaymentInitn):
"""
This class creates a Sepa transfer XML File.
"""
root_el = "CstmrCdtTrfInitn"
def __init__(self, config, schema="pain.001.001.03", clean=True):
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
# Validate the payment
self.check_payment(payment)
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount']
)
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if 'execution_date' in payment:
PmtInf_nodes['ReqdExctnDtNode'].text = payment['execution_date']
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['EndToEnd_PmtId_Node'].text = payment.get('endtoend_id', 'NOTPROVIDED')
if bic:
TX_nodes['BIC_CdtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Cdtr_Node'].text = payment['name']
TX_nodes['IBAN_CdtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['CdtTrfTxInfNode'] = ET.Element("CdtTrfTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEnd_PmtId_Node'] = ET.Element("EndToEndId")
ED['AmtNode'] = ET.Element("Amt")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._create_header | python | def _create_header(self):
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node) | Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L122-L153 | null | class SepaTransfer(SepaPaymentInitn):
"""
This class creates a Sepa transfer XML File.
"""
root_el = "CstmrCdtTrfInitn"
def __init__(self, config, schema="pain.001.001.03", clean=True):
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
# Validate the payment
self.check_payment(payment)
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount']
)
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if 'execution_date' in payment:
PmtInf_nodes['ReqdExctnDtNode'].text = payment['execution_date']
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['EndToEnd_PmtId_Node'].text = payment.get('endtoend_id', 'NOTPROVIDED')
if bic:
TX_nodes['BIC_CdtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Cdtr_Node'].text = payment['name']
TX_nodes['IBAN_CdtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['CdtTrfTxInfNode'] = ET.Element("CdtTrfTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEnd_PmtId_Node'] = ET.Element("EndToEndId")
ED['AmtNode'] = ET.Element("Amt")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._create_PmtInf_node | python | def _create_PmtInf_node(self):
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED | Method to create the blank payment information nodes as a dict. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L155-L181 | null | class SepaTransfer(SepaPaymentInitn):
"""
This class creates a Sepa transfer XML File.
"""
root_el = "CstmrCdtTrfInitn"
def __init__(self, config, schema="pain.001.001.03", clean=True):
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
# Validate the payment
self.check_payment(payment)
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount']
)
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if 'execution_date' in payment:
PmtInf_nodes['ReqdExctnDtNode'].text = payment['execution_date']
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['EndToEnd_PmtId_Node'].text = payment.get('endtoend_id', 'NOTPROVIDED')
if bic:
TX_nodes['BIC_CdtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Cdtr_Node'].text = payment['name']
TX_nodes['IBAN_CdtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node)
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['CdtTrfTxInfNode'] = ET.Element("CdtTrfTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEnd_PmtId_Node'] = ET.Element("EndToEndId")
ED['AmtNode'] = ET.Element("Amt")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._add_non_batch | python | def _add_non_batch(self, TX_nodes, PmtInf_nodes):
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode']) | Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L207-L260 | null | class SepaTransfer(SepaPaymentInitn):
"""
This class creates a Sepa transfer XML File.
"""
root_el = "CstmrCdtTrfInitn"
def __init__(self, config, schema="pain.001.001.03", clean=True):
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
# Validate the payment
self.check_payment(payment)
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount']
)
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if 'execution_date' in payment:
PmtInf_nodes['ReqdExctnDtNode'].text = payment['execution_date']
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['EndToEnd_PmtId_Node'].text = payment.get('endtoend_id', 'NOTPROVIDED')
if bic:
TX_nodes['BIC_CdtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Cdtr_Node'].text = payment['name']
TX_nodes['IBAN_CdtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['CdtTrfTxInfNode'] = ET.Element("CdtTrfTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEnd_PmtId_Node'] = ET.Element("EndToEndId")
ED['AmtNode'] = ET.Element("Amt")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._add_batch | python | def _add_batch(self, TX_nodes, payment):
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment) | Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L262-L288 | null | class SepaTransfer(SepaPaymentInitn):
"""
This class creates a Sepa transfer XML File.
"""
root_el = "CstmrCdtTrfInitn"
def __init__(self, config, schema="pain.001.001.03", clean=True):
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
# Validate the payment
self.check_payment(payment)
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount']
)
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if 'execution_date' in payment:
PmtInf_nodes['ReqdExctnDtNode'].text = payment['execution_date']
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['EndToEnd_PmtId_Node'].text = payment.get('endtoend_id', 'NOTPROVIDED')
if bic:
TX_nodes['BIC_CdtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Cdtr_Node'].text = payment['name']
TX_nodes['IBAN_CdtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['CdtTrfTxInfNode'] = ET.Element("CdtTrfTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEnd_PmtId_Node'] = ET.Element("EndToEndId")
ED['AmtNode'] = ET.Element("Amt")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._add_to_batch_list | python | def _add_to_batch_list(self, TX, payment):
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount'] | Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L290-L307 | null | class SepaTransfer(SepaPaymentInitn):
"""
This class creates a Sepa transfer XML File.
"""
root_el = "CstmrCdtTrfInitn"
def __init__(self, config, schema="pain.001.001.03", clean=True):
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
# Validate the payment
self.check_payment(payment)
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount']
)
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if 'execution_date' in payment:
PmtInf_nodes['ReqdExctnDtNode'].text = payment['execution_date']
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['EndToEnd_PmtId_Node'].text = payment.get('endtoend_id', 'NOTPROVIDED')
if bic:
TX_nodes['BIC_CdtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Cdtr_Node'].text = payment['name']
TX_nodes['IBAN_CdtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['CdtTrfTxInfNode'] = ET.Element("CdtTrfTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEnd_PmtId_Node'] = ET.Element("EndToEndId")
ED['AmtNode'] = ET.Element("Amt")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._finalize_batch | python | def _finalize_batch(self):
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode']) | Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L309-L369 | null | class SepaTransfer(SepaPaymentInitn):
"""
This class creates a Sepa transfer XML File.
"""
root_el = "CstmrCdtTrfInitn"
def __init__(self, config, schema="pain.001.001.03", clean=True):
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
# Validate the payment
self.check_payment(payment)
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount']
)
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if 'execution_date' in payment:
PmtInf_nodes['ReqdExctnDtNode'].text = payment['execution_date']
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['EndToEnd_PmtId_Node'].text = payment.get('endtoend_id', 'NOTPROVIDED')
if bic:
TX_nodes['BIC_CdtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Cdtr_Node'].text = payment['name']
TX_nodes['IBAN_CdtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['CdtTrfTxInfNode'] = ET.Element("CdtTrfTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEnd_PmtId_Node'] = ET.Element("EndToEndId")
ED['AmtNode'] = ET.Element("Amt")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
|
raphaelm/python-sepaxml | sepaxml/shared.py | SepaPaymentInitn._prepare_document | python | def _prepare_document(self):
self._xml = ET.Element("Document")
self._xml.set("xmlns",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
self._xml.set("xmlns:xsi",
"http://www.w3.org/2001/XMLSchema-instance")
ET.register_namespace("",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
ET.register_namespace("xsi",
"http://www.w3.org/2001/XMLSchema-instance")
n = ET.Element(self.root_el)
self._xml.append(n) | Build the main document node and set xml namespaces. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/shared.py#L36-L50 | null | class SepaPaymentInitn:
def __init__(self, config, schema, clean=True):
"""
Constructor. Checks the config, prepares the document and
builds the header.
@param param: The config dict.
@raise exception: When the config file is invalid.
"""
self._config = None # Will contain the config file.
self._xml = None # Will contain the final XML file.
self._batches = OrderedDict() # Will contain the SEPA batches.
self._batch_totals = OrderedDict() # Will contain the total amount to debit per batch for checksum total.
self.schema = schema
self.msg_id = make_msg_id()
self.clean = clean
config_result = self.check_config(config)
if config_result:
self._config = config
if self.clean:
from text_unidecode import unidecode
self._config['name'] = unidecode(self._config['name'])[:70]
self._prepare_document()
self._create_header()
def _create_header(self):
raise NotImplementedError()
def _finalize_batch(self):
raise NotImplementedError()
def export(self, validate=True):
"""
Method to output the xml as string. It will finalize the batches and
then calculate the checksums (amount sum and transaction count),
fill these into the group header and output the XML.
"""
self._finalize_batch()
ctrl_sum_total = 0
nb_of_txs_total = 0
for ctrl_sum in self._xml.iter('CtrlSum'):
if ctrl_sum.text is None:
continue
ctrl_sum_total += decimal_str_to_int(ctrl_sum.text)
for nb_of_txs in self._xml.iter('NbOfTxs'):
if nb_of_txs.text is None:
continue
nb_of_txs_total += int(nb_of_txs.text)
n = self._xml.find(self.root_el)
GrpHdr_node = n.find('GrpHdr')
CtrlSum_node = GrpHdr_node.find('CtrlSum')
NbOfTxs_node = GrpHdr_node.find('NbOfTxs')
CtrlSum_node.text = int_to_decimal_str(ctrl_sum_total)
NbOfTxs_node.text = str(nb_of_txs_total)
# Prepending the XML version is hacky, but cElementTree only offers this
# automatically if you write to a file, which we don't necessarily want.
out = b"<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + ET.tostring(
self._xml, "utf-8")
if validate and not is_valid_xml(out, self.schema):
raise ValidationError(
"The output SEPA file contains validation errors. This is likely due to an illegal value in one of "
"your input fields."
)
return out
|
raphaelm/python-sepaxml | sepaxml/shared.py | SepaPaymentInitn.export | python | def export(self, validate=True):
self._finalize_batch()
ctrl_sum_total = 0
nb_of_txs_total = 0
for ctrl_sum in self._xml.iter('CtrlSum'):
if ctrl_sum.text is None:
continue
ctrl_sum_total += decimal_str_to_int(ctrl_sum.text)
for nb_of_txs in self._xml.iter('NbOfTxs'):
if nb_of_txs.text is None:
continue
nb_of_txs_total += int(nb_of_txs.text)
n = self._xml.find(self.root_el)
GrpHdr_node = n.find('GrpHdr')
CtrlSum_node = GrpHdr_node.find('CtrlSum')
NbOfTxs_node = GrpHdr_node.find('NbOfTxs')
CtrlSum_node.text = int_to_decimal_str(ctrl_sum_total)
NbOfTxs_node.text = str(nb_of_txs_total)
# Prepending the XML version is hacky, but cElementTree only offers this
# automatically if you write to a file, which we don't necessarily want.
out = b"<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + ET.tostring(
self._xml, "utf-8")
if validate and not is_valid_xml(out, self.schema):
raise ValidationError(
"The output SEPA file contains validation errors. This is likely due to an illegal value in one of "
"your input fields."
)
return out | Method to output the xml as string. It will finalize the batches and
then calculate the checksums (amount sum and transaction count),
fill these into the group header and output the XML. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/shared.py#L58-L95 | [
"def int_to_decimal_str(integer):\n \"\"\"\n Helper to convert integers (representing cents) into decimal currency\n string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT\n ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.\n @param integer The amount in cents\n @return string The amount in currency with full stop decimal separator\n \"\"\"\n int_string = str(integer)\n if len(int_string) < 2:\n return \"0.\" + int_string.zfill(2)\n else:\n return int_string[:-2] + \".\" + int_string[-2:]\n",
"def decimal_str_to_int(decimal_string):\n \"\"\"\n Helper to decimal currency string into integers (cents).\n WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION,\n FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.\n @param string The amount in currency with full stop decimal separator\n @return integer The amount in cents\n \"\"\"\n int_string = decimal_string.replace('.', '')\n int_string = int_string.lstrip('0')\n return int(int_string)\n",
"def is_valid_xml(xmlout, schema):\n import xmlschema # xmlschema does some weird monkeypatching in etree, if we import it globally, things fail\n my_schema = xmlschema.XMLSchema(os.path.join(os.path.dirname(__file__), 'schemas', schema + '.xsd'))\n return my_schema.is_valid(xmlout.decode())\n",
"def _finalize_batch(self):\n \"\"\"\n Method to finalize the batch, this will iterate over the _batches dict\n and create a PmtInf node for each batch. The correct information (from\n the batch_key and batch_totals) will be inserted and the batch\n transaction nodes will be folded. Finally, the batches will be added to\n the main XML.\n \"\"\"\n for batch_meta, batch_nodes in self._batches.items():\n batch_meta_split = batch_meta.split(\"::\")\n PmtInf_nodes = self._create_PmtInf_node()\n PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])\n PmtInf_nodes['PmtMtdNode'].text = \"DD\"\n PmtInf_nodes['BtchBookgNode'].text = \"true\"\n PmtInf_nodes['Cd_SvcLvl_Node'].text = \"SEPA\"\n PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']\n PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]\n PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]\n PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']\n PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']\n\n if 'BIC' in self._config:\n PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']\n\n PmtInf_nodes['ChrgBrNode'].text = \"SLEV\"\n PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']\n PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']\n PmtInf_nodes['PrtryNode'].text = \"SEPA\"\n\n PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))\n PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])\n\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])\n\n PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])\n PmtInf_nodes['LclInstrmNode'].append(\n PmtInf_nodes['Cd_LclInstrm_Node'])\n PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])\n PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])\n PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])\n\n PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])\n\n PmtInf_nodes['Id_CdtrAcct_Node'].append(\n PmtInf_nodes['IBAN_CdtrAcct_Node'])\n PmtInf_nodes['CdtrAcctNode'].append(\n PmtInf_nodes['Id_CdtrAcct_Node'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])\n\n if 'BIC' in self._config:\n PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(\n PmtInf_nodes['BIC_CdtrAgt_Node'])\n PmtInf_nodes['CdtrAgtNode'].append(\n PmtInf_nodes['FinInstnId_CdtrAgt_Node'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])\n\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])\n\n if self.schema == 'pain.008.001.02':\n PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])\n PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])\n PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])\n PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])\n PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])\n PmtInf_nodes['Id_CdtrSchmeId_Node'].append(\n PmtInf_nodes['PrvtIdNode'])\n PmtInf_nodes['CdtrSchmeIdNode'].append(\n PmtInf_nodes['Id_CdtrSchmeId_Node'])\n PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])\n\n for txnode in batch_nodes:\n PmtInf_nodes['PmtInfNode'].append(txnode)\n\n CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')\n CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode'])\n",
"def _finalize_batch(self):\n raise NotImplementedError()\n"
] | class SepaPaymentInitn:
def __init__(self, config, schema, clean=True):
"""
Constructor. Checks the config, prepares the document and
builds the header.
@param param: The config dict.
@raise exception: When the config file is invalid.
"""
self._config = None # Will contain the config file.
self._xml = None # Will contain the final XML file.
self._batches = OrderedDict() # Will contain the SEPA batches.
self._batch_totals = OrderedDict() # Will contain the total amount to debit per batch for checksum total.
self.schema = schema
self.msg_id = make_msg_id()
self.clean = clean
config_result = self.check_config(config)
if config_result:
self._config = config
if self.clean:
from text_unidecode import unidecode
self._config['name'] = unidecode(self._config['name'])[:70]
self._prepare_document()
self._create_header()
def _prepare_document(self):
"""
Build the main document node and set xml namespaces.
"""
self._xml = ET.Element("Document")
self._xml.set("xmlns",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
self._xml.set("xmlns:xsi",
"http://www.w3.org/2001/XMLSchema-instance")
ET.register_namespace("",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
ET.register_namespace("xsi",
"http://www.w3.org/2001/XMLSchema-instance")
n = ET.Element(self.root_el)
self._xml.append(n)
def _create_header(self):
raise NotImplementedError()
def _finalize_batch(self):
raise NotImplementedError()
|
raphaelm/python-sepaxml | sepaxml/utils.py | get_rand_string | python | def get_rand_string(length=12, allowed_chars='0123456789abcdef'):
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s" % (
random.getstate(),
time.time())).encode('utf-8')
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)]) | Returns a securely generated random string. Taken from the Django project
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L16-L36 | null | import hashlib
import random
import re
import time
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def make_msg_id():
"""
Create a semi random message id, by using 12 char random hex string and
a timestamp.
@return: string consisting of timestamp, -, random value
"""
random_string = get_rand_string(12)
timestamp = time.strftime("%Y%m%d%I%M%S")
msg_id = timestamp + "-" + random_string
return msg_id
def make_id(name):
"""
Create a random id combined with the creditor name.
@return string consisting of name (truncated at 22 chars), -,
12 char rand hex string.
"""
name = re.sub(r'[^a-zA-Z0-9]', '', name)
r = get_rand_string(12)
if len(name) > 22:
name = name[:22]
return name + "-" + r
def int_to_decimal_str(integer):
"""
Helper to convert integers (representing cents) into decimal currency
string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT
ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param integer The amount in cents
@return string The amount in currency with full stop decimal separator
"""
int_string = str(integer)
if len(int_string) < 2:
return "0." + int_string.zfill(2)
else:
return int_string[:-2] + "." + int_string[-2:]
def decimal_str_to_int(decimal_string):
"""
Helper to decimal currency string into integers (cents).
WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION,
FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param string The amount in currency with full stop decimal separator
@return integer The amount in cents
"""
int_string = decimal_string.replace('.', '')
int_string = int_string.lstrip('0')
return int(int_string)
|
raphaelm/python-sepaxml | sepaxml/utils.py | make_msg_id | python | def make_msg_id():
random_string = get_rand_string(12)
timestamp = time.strftime("%Y%m%d%I%M%S")
msg_id = timestamp + "-" + random_string
return msg_id | Create a semi random message id, by using 12 char random hex string and
a timestamp.
@return: string consisting of timestamp, -, random value | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L39-L48 | [
"def get_rand_string(length=12, allowed_chars='0123456789abcdef'):\n \"\"\"\n Returns a securely generated random string. Taken from the Django project\n\n The default length of 12 with the a-z, A-Z, 0-9 character set returns\n a 71-bit value. log_2((26+26+10)^12) =~ 71 bits\n \"\"\"\n if not using_sysrandom:\n # This is ugly, and a hack, but it makes things better than\n # the alternative of predictability. This re-seeds the PRNG\n # using a value that is hard for an attacker to predict, every\n # time a random string is required. This may change the\n # properties of the chosen random sequence slightly, but this\n # is better than absolute predictability.\n random.seed(\n hashlib.sha256(\n (\"%s%s\" % (\n random.getstate(),\n time.time())).encode('utf-8')\n ).digest())\n return ''.join([random.choice(allowed_chars) for i in range(length)])\n"
] | import hashlib
import random
import re
import time
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def get_rand_string(length=12, allowed_chars='0123456789abcdef'):
"""
Returns a securely generated random string. Taken from the Django project
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s" % (
random.getstate(),
time.time())).encode('utf-8')
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)])
def make_id(name):
"""
Create a random id combined with the creditor name.
@return string consisting of name (truncated at 22 chars), -,
12 char rand hex string.
"""
name = re.sub(r'[^a-zA-Z0-9]', '', name)
r = get_rand_string(12)
if len(name) > 22:
name = name[:22]
return name + "-" + r
def int_to_decimal_str(integer):
"""
Helper to convert integers (representing cents) into decimal currency
string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT
ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param integer The amount in cents
@return string The amount in currency with full stop decimal separator
"""
int_string = str(integer)
if len(int_string) < 2:
return "0." + int_string.zfill(2)
else:
return int_string[:-2] + "." + int_string[-2:]
def decimal_str_to_int(decimal_string):
"""
Helper to decimal currency string into integers (cents).
WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION,
FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param string The amount in currency with full stop decimal separator
@return integer The amount in cents
"""
int_string = decimal_string.replace('.', '')
int_string = int_string.lstrip('0')
return int(int_string)
|
raphaelm/python-sepaxml | sepaxml/utils.py | make_id | python | def make_id(name):
name = re.sub(r'[^a-zA-Z0-9]', '', name)
r = get_rand_string(12)
if len(name) > 22:
name = name[:22]
return name + "-" + r | Create a random id combined with the creditor name.
@return string consisting of name (truncated at 22 chars), -,
12 char rand hex string. | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L51-L61 | [
"def get_rand_string(length=12, allowed_chars='0123456789abcdef'):\n \"\"\"\n Returns a securely generated random string. Taken from the Django project\n\n The default length of 12 with the a-z, A-Z, 0-9 character set returns\n a 71-bit value. log_2((26+26+10)^12) =~ 71 bits\n \"\"\"\n if not using_sysrandom:\n # This is ugly, and a hack, but it makes things better than\n # the alternative of predictability. This re-seeds the PRNG\n # using a value that is hard for an attacker to predict, every\n # time a random string is required. This may change the\n # properties of the chosen random sequence slightly, but this\n # is better than absolute predictability.\n random.seed(\n hashlib.sha256(\n (\"%s%s\" % (\n random.getstate(),\n time.time())).encode('utf-8')\n ).digest())\n return ''.join([random.choice(allowed_chars) for i in range(length)])\n"
] | import hashlib
import random
import re
import time
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def get_rand_string(length=12, allowed_chars='0123456789abcdef'):
"""
Returns a securely generated random string. Taken from the Django project
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s" % (
random.getstate(),
time.time())).encode('utf-8')
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)])
def make_msg_id():
"""
Create a semi random message id, by using 12 char random hex string and
a timestamp.
@return: string consisting of timestamp, -, random value
"""
random_string = get_rand_string(12)
timestamp = time.strftime("%Y%m%d%I%M%S")
msg_id = timestamp + "-" + random_string
return msg_id
def int_to_decimal_str(integer):
"""
Helper to convert integers (representing cents) into decimal currency
string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT
ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param integer The amount in cents
@return string The amount in currency with full stop decimal separator
"""
int_string = str(integer)
if len(int_string) < 2:
return "0." + int_string.zfill(2)
else:
return int_string[:-2] + "." + int_string[-2:]
def decimal_str_to_int(decimal_string):
"""
Helper to decimal currency string into integers (cents).
WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION,
FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param string The amount in currency with full stop decimal separator
@return integer The amount in cents
"""
int_string = decimal_string.replace('.', '')
int_string = int_string.lstrip('0')
return int(int_string)
|
raphaelm/python-sepaxml | sepaxml/utils.py | int_to_decimal_str | python | def int_to_decimal_str(integer):
int_string = str(integer)
if len(int_string) < 2:
return "0." + int_string.zfill(2)
else:
return int_string[:-2] + "." + int_string[-2:] | Helper to convert integers (representing cents) into decimal currency
string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT
ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param integer The amount in cents
@return string The amount in currency with full stop decimal separator | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L64-L76 | null | import hashlib
import random
import re
import time
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def get_rand_string(length=12, allowed_chars='0123456789abcdef'):
"""
Returns a securely generated random string. Taken from the Django project
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s" % (
random.getstate(),
time.time())).encode('utf-8')
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)])
def make_msg_id():
"""
Create a semi random message id, by using 12 char random hex string and
a timestamp.
@return: string consisting of timestamp, -, random value
"""
random_string = get_rand_string(12)
timestamp = time.strftime("%Y%m%d%I%M%S")
msg_id = timestamp + "-" + random_string
return msg_id
def make_id(name):
"""
Create a random id combined with the creditor name.
@return string consisting of name (truncated at 22 chars), -,
12 char rand hex string.
"""
name = re.sub(r'[^a-zA-Z0-9]', '', name)
r = get_rand_string(12)
if len(name) > 22:
name = name[:22]
return name + "-" + r
def decimal_str_to_int(decimal_string):
"""
Helper to decimal currency string into integers (cents).
WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION,
FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param string The amount in currency with full stop decimal separator
@return integer The amount in cents
"""
int_string = decimal_string.replace('.', '')
int_string = int_string.lstrip('0')
return int(int_string)
|
raphaelm/python-sepaxml | sepaxml/utils.py | decimal_str_to_int | python | def decimal_str_to_int(decimal_string):
int_string = decimal_string.replace('.', '')
int_string = int_string.lstrip('0')
return int(int_string) | Helper to decimal currency string into integers (cents).
WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION,
FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param string The amount in currency with full stop decimal separator
@return integer The amount in cents | train | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L79-L89 | null | import hashlib
import random
import re
import time
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def get_rand_string(length=12, allowed_chars='0123456789abcdef'):
"""
Returns a securely generated random string. Taken from the Django project
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s" % (
random.getstate(),
time.time())).encode('utf-8')
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)])
def make_msg_id():
"""
Create a semi random message id, by using 12 char random hex string and
a timestamp.
@return: string consisting of timestamp, -, random value
"""
random_string = get_rand_string(12)
timestamp = time.strftime("%Y%m%d%I%M%S")
msg_id = timestamp + "-" + random_string
return msg_id
def make_id(name):
"""
Create a random id combined with the creditor name.
@return string consisting of name (truncated at 22 chars), -,
12 char rand hex string.
"""
name = re.sub(r'[^a-zA-Z0-9]', '', name)
r = get_rand_string(12)
if len(name) > 22:
name = name[:22]
return name + "-" + r
def int_to_decimal_str(integer):
"""
Helper to convert integers (representing cents) into decimal currency
string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT
ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param integer The amount in cents
@return string The amount in currency with full stop decimal separator
"""
int_string = str(integer)
if len(int_string) < 2:
return "0." + int_string.zfill(2)
else:
return int_string[:-2] + "." + int_string[-2:]
|
clld/clldutils | src/clldutils/jsonlib.py | parse | python | def parse(d):
res = {}
for k, v in iteritems(d):
if isinstance(v, string_types) and DATETIME_ISO_FORMAT.match(v):
v = dateutil.parser.parse(v)
res[k] = v
return res | Convert iso formatted timestamps found as values in the dict d to datetime objects.
:return: A shallow copy of d with converted timestamps. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/jsonlib.py#L18-L28 | null | from __future__ import unicode_literals
import re
import json
import contextlib
from datetime import date, datetime
from collections import OrderedDict
from six import PY3, string_types, iteritems
import dateutil.parser
from clldutils._compat import pathlib
DATETIME_ISO_FORMAT = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+')
def format(value):
if isinstance(value, (date, datetime)):
return value.isoformat()
return value
def dump(obj, path, **kw):
"""Python 2 + 3 compatible version of json.dump.
:param obj: The object to be dumped.
:param path: The path of the JSON file to be written.
:param kw: Keyword parameters are passed to json.dump
"""
open_kw = {'mode': 'w'}
if PY3: # pragma: no cover
open_kw['encoding'] = 'utf-8'
# avoid indented lines ending with ", " on PY2
if kw.get('indent') and kw.get('separators') is None:
kw['separators'] = (',', ': ')
with open(str(path), **open_kw) as fp:
return json.dump(obj, fp, **kw)
def load(path, **kw):
"""python 2 + 3 compatible version of json.load.
:param kw: Keyword parameters are passed to json.load
:return: The python object read from path.
"""
_kw = {}
if PY3: # pragma: no cover
_kw['encoding'] = 'utf-8'
with open(str(path), **_kw) as fp:
return json.load(fp, **kw)
@contextlib.contextmanager
def update(path, default=None, load_kw=None, **kw):
path = pathlib.Path(path)
if not path.exists():
if default is None:
raise ValueError('path does not exist')
res = default
else:
res = load(path, **(load_kw or {}))
yield res
dump(res, path, **kw)
def update_ordered(path, **kw):
return update(path, default=OrderedDict(), load_kw=dict(object_pairs_hook=OrderedDict), **kw)
|
clld/clldutils | src/clldutils/jsonlib.py | dump | python | def dump(obj, path, **kw):
open_kw = {'mode': 'w'}
if PY3: # pragma: no cover
open_kw['encoding'] = 'utf-8'
# avoid indented lines ending with ", " on PY2
if kw.get('indent') and kw.get('separators') is None:
kw['separators'] = (',', ': ')
with open(str(path), **open_kw) as fp:
return json.dump(obj, fp, **kw) | Python 2 + 3 compatible version of json.dump.
:param obj: The object to be dumped.
:param path: The path of the JSON file to be written.
:param kw: Keyword parameters are passed to json.dump | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/jsonlib.py#L37-L53 | null | from __future__ import unicode_literals
import re
import json
import contextlib
from datetime import date, datetime
from collections import OrderedDict
from six import PY3, string_types, iteritems
import dateutil.parser
from clldutils._compat import pathlib
DATETIME_ISO_FORMAT = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+')
def parse(d):
"""Convert iso formatted timestamps found as values in the dict d to datetime objects.
:return: A shallow copy of d with converted timestamps.
"""
res = {}
for k, v in iteritems(d):
if isinstance(v, string_types) and DATETIME_ISO_FORMAT.match(v):
v = dateutil.parser.parse(v)
res[k] = v
return res
def format(value):
if isinstance(value, (date, datetime)):
return value.isoformat()
return value
def load(path, **kw):
"""python 2 + 3 compatible version of json.load.
:param kw: Keyword parameters are passed to json.load
:return: The python object read from path.
"""
_kw = {}
if PY3: # pragma: no cover
_kw['encoding'] = 'utf-8'
with open(str(path), **_kw) as fp:
return json.load(fp, **kw)
@contextlib.contextmanager
def update(path, default=None, load_kw=None, **kw):
path = pathlib.Path(path)
if not path.exists():
if default is None:
raise ValueError('path does not exist')
res = default
else:
res = load(path, **(load_kw or {}))
yield res
dump(res, path, **kw)
def update_ordered(path, **kw):
return update(path, default=OrderedDict(), load_kw=dict(object_pairs_hook=OrderedDict), **kw)
|
clld/clldutils | src/clldutils/jsonlib.py | load | python | def load(path, **kw):
_kw = {}
if PY3: # pragma: no cover
_kw['encoding'] = 'utf-8'
with open(str(path), **_kw) as fp:
return json.load(fp, **kw) | python 2 + 3 compatible version of json.load.
:param kw: Keyword parameters are passed to json.load
:return: The python object read from path. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/jsonlib.py#L56-L66 | null | from __future__ import unicode_literals
import re
import json
import contextlib
from datetime import date, datetime
from collections import OrderedDict
from six import PY3, string_types, iteritems
import dateutil.parser
from clldutils._compat import pathlib
DATETIME_ISO_FORMAT = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+')
def parse(d):
"""Convert iso formatted timestamps found as values in the dict d to datetime objects.
:return: A shallow copy of d with converted timestamps.
"""
res = {}
for k, v in iteritems(d):
if isinstance(v, string_types) and DATETIME_ISO_FORMAT.match(v):
v = dateutil.parser.parse(v)
res[k] = v
return res
def format(value):
if isinstance(value, (date, datetime)):
return value.isoformat()
return value
def dump(obj, path, **kw):
"""Python 2 + 3 compatible version of json.dump.
:param obj: The object to be dumped.
:param path: The path of the JSON file to be written.
:param kw: Keyword parameters are passed to json.dump
"""
open_kw = {'mode': 'w'}
if PY3: # pragma: no cover
open_kw['encoding'] = 'utf-8'
# avoid indented lines ending with ", " on PY2
if kw.get('indent') and kw.get('separators') is None:
kw['separators'] = (',', ': ')
with open(str(path), **open_kw) as fp:
return json.dump(obj, fp, **kw)
@contextlib.contextmanager
def update(path, default=None, load_kw=None, **kw):
path = pathlib.Path(path)
if not path.exists():
if default is None:
raise ValueError('path does not exist')
res = default
else:
res = load(path, **(load_kw or {}))
yield res
dump(res, path, **kw)
def update_ordered(path, **kw):
return update(path, default=OrderedDict(), load_kw=dict(object_pairs_hook=OrderedDict), **kw)
|
clld/clldutils | src/clldutils/text.py | strip_brackets | python | def strip_brackets(text, brackets=None):
res = []
for c, type_ in _tokens(text, brackets=brackets):
if type_ == TextType.text:
res.append(c)
return ''.join(res).strip() | Strip brackets and what is inside brackets from text.
.. note::
If the text contains only one opening bracket, the rest of the text
will be ignored. This is a feature, not a bug, as we want to avoid that
this function raises errors too easily. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/text.py#L56-L68 | [
"def _tokens(text, brackets=None):\n if brackets is None:\n brackets = BRACKETS\n stack = []\n for c in text:\n if c in brackets:\n stack.append(brackets[c])\n yield c, TextType.open\n elif stack and c == stack[-1]:\n stack.pop()\n yield c, TextType.close\n elif not stack:\n yield c, TextType.text\n else:\n yield c, TextType.context\n"
] | # coding: utf8
from __future__ import unicode_literals
import re
import textwrap
from clldutils.misc import nfilter
# Brackets are pairs of single characters (<start-token>, <end-token>):
BRACKETS = {
"(": ")",
"{": "}",
"[": "]",
"(": ")",
"【": "】",
"『": "』",
"«": "»",
"⁽": "⁾",
"₍": "₎"
}
# To make it possible to detect compiled regex patterns, we store their type.
# See also http://stackoverflow.com/a/6102100
PATTERN_TYPE = type(re.compile('a'))
# A string of all unicode characters regarded as whitespace (by python's re module \s):
# See also http://stackoverflow.com/a/37903645
WHITESPACE = \
'\t\n\x0b\x0c\r\x1c\x1d\x1e\x1f \x85\xa0\u1680\u2000\u2001\u2002\u2003\u2004\u2005' \
'\u2006\u2007\u2008\u2009\u200a\u2028\u2029\u202f\u205f\u3000'
class TextType(object):
text = 1 # token outside of brackets
open = 2 # start-token of a bracket
context = 3 # non-bracket token inside brackets
close = 4 # end-token of a bracket
def _tokens(text, brackets=None):
if brackets is None:
brackets = BRACKETS
stack = []
for c in text:
if c in brackets:
stack.append(brackets[c])
yield c, TextType.open
elif stack and c == stack[-1]:
stack.pop()
yield c, TextType.close
elif not stack:
yield c, TextType.text
else:
yield c, TextType.context
def split_text_with_context(text, separators=WHITESPACE, brackets=None):
"""Splits text at separators outside of brackets.
:param text:
:param separators: An iterable of single character tokens.
:param brackets:
:return: A `list` of non-empty chunks.
.. note:: This function leaves content in brackets in the chunks.
"""
res, chunk = [], []
for c, type_ in _tokens(text, brackets=brackets):
if type_ == TextType.text and c in separators:
res.append(''.join(chunk).strip())
chunk = []
else:
chunk.append(c)
res.append(''.join(chunk).strip())
return nfilter(res)
def split_text(text, separators=re.compile('\s'), brackets=None, strip=False):
"""Split text along the separators unless they appear within brackets.
:param separators: An iterable single characters or a compiled regex pattern.
:param brackets: `dict` mapping start tokens to end tokens of what is to be \
recognized as brackets.
.. note:: This function will also strip content within brackets.
"""
if not isinstance(separators, PATTERN_TYPE):
separators = re.compile(
'[{0}]'.format(''.join('\{0}'.format(c) for c in separators)))
return nfilter(
s.strip() if strip else s for s in
separators.split(strip_brackets(text, brackets=brackets)))
def strip_chars(chars, sequence):
"""Strip the specified chars from anywhere in the text.
:param chars: An iterable of single character tokens to be stripped out.
:param sequence: An iterable of single character tokens.
:return: Text string concatenating all tokens in sequence which were not stripped.
"""
return ''.join(s for s in sequence if s not in chars)
def truncate_with_ellipsis(t, ellipsis='\u2026', width=40, **kw):
return t if len(t) <= width else textwrap.wrap(t, width=width, **kw)[0] + ellipsis
|
clld/clldutils | src/clldutils/text.py | split_text_with_context | python | def split_text_with_context(text, separators=WHITESPACE, brackets=None):
res, chunk = [], []
for c, type_ in _tokens(text, brackets=brackets):
if type_ == TextType.text and c in separators:
res.append(''.join(chunk).strip())
chunk = []
else:
chunk.append(c)
res.append(''.join(chunk).strip())
return nfilter(res) | Splits text at separators outside of brackets.
:param text:
:param separators: An iterable of single character tokens.
:param brackets:
:return: A `list` of non-empty chunks.
.. note:: This function leaves content in brackets in the chunks. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/text.py#L71-L89 | [
"def nfilter(seq):\n \"\"\"Replacement for python 2's filter(None, seq).\n\n :return: a list filtered from seq containing only truthy items.\n \"\"\"\n return [e for e in seq if e]\n",
"def _tokens(text, brackets=None):\n if brackets is None:\n brackets = BRACKETS\n stack = []\n for c in text:\n if c in brackets:\n stack.append(brackets[c])\n yield c, TextType.open\n elif stack and c == stack[-1]:\n stack.pop()\n yield c, TextType.close\n elif not stack:\n yield c, TextType.text\n else:\n yield c, TextType.context\n"
] | # coding: utf8
from __future__ import unicode_literals
import re
import textwrap
from clldutils.misc import nfilter
# Brackets are pairs of single characters (<start-token>, <end-token>):
BRACKETS = {
"(": ")",
"{": "}",
"[": "]",
"(": ")",
"【": "】",
"『": "』",
"«": "»",
"⁽": "⁾",
"₍": "₎"
}
# To make it possible to detect compiled regex patterns, we store their type.
# See also http://stackoverflow.com/a/6102100
PATTERN_TYPE = type(re.compile('a'))
# A string of all unicode characters regarded as whitespace (by python's re module \s):
# See also http://stackoverflow.com/a/37903645
WHITESPACE = \
'\t\n\x0b\x0c\r\x1c\x1d\x1e\x1f \x85\xa0\u1680\u2000\u2001\u2002\u2003\u2004\u2005' \
'\u2006\u2007\u2008\u2009\u200a\u2028\u2029\u202f\u205f\u3000'
class TextType(object):
text = 1 # token outside of brackets
open = 2 # start-token of a bracket
context = 3 # non-bracket token inside brackets
close = 4 # end-token of a bracket
def _tokens(text, brackets=None):
if brackets is None:
brackets = BRACKETS
stack = []
for c in text:
if c in brackets:
stack.append(brackets[c])
yield c, TextType.open
elif stack and c == stack[-1]:
stack.pop()
yield c, TextType.close
elif not stack:
yield c, TextType.text
else:
yield c, TextType.context
def strip_brackets(text, brackets=None):
"""Strip brackets and what is inside brackets from text.
.. note::
If the text contains only one opening bracket, the rest of the text
will be ignored. This is a feature, not a bug, as we want to avoid that
this function raises errors too easily.
"""
res = []
for c, type_ in _tokens(text, brackets=brackets):
if type_ == TextType.text:
res.append(c)
return ''.join(res).strip()
def split_text(text, separators=re.compile('\s'), brackets=None, strip=False):
"""Split text along the separators unless they appear within brackets.
:param separators: An iterable single characters or a compiled regex pattern.
:param brackets: `dict` mapping start tokens to end tokens of what is to be \
recognized as brackets.
.. note:: This function will also strip content within brackets.
"""
if not isinstance(separators, PATTERN_TYPE):
separators = re.compile(
'[{0}]'.format(''.join('\{0}'.format(c) for c in separators)))
return nfilter(
s.strip() if strip else s for s in
separators.split(strip_brackets(text, brackets=brackets)))
def strip_chars(chars, sequence):
"""Strip the specified chars from anywhere in the text.
:param chars: An iterable of single character tokens to be stripped out.
:param sequence: An iterable of single character tokens.
:return: Text string concatenating all tokens in sequence which were not stripped.
"""
return ''.join(s for s in sequence if s not in chars)
def truncate_with_ellipsis(t, ellipsis='\u2026', width=40, **kw):
return t if len(t) <= width else textwrap.wrap(t, width=width, **kw)[0] + ellipsis
|
clld/clldutils | src/clldutils/text.py | split_text | python | def split_text(text, separators=re.compile('\s'), brackets=None, strip=False):
if not isinstance(separators, PATTERN_TYPE):
separators = re.compile(
'[{0}]'.format(''.join('\{0}'.format(c) for c in separators)))
return nfilter(
s.strip() if strip else s for s in
separators.split(strip_brackets(text, brackets=brackets))) | Split text along the separators unless they appear within brackets.
:param separators: An iterable single characters or a compiled regex pattern.
:param brackets: `dict` mapping start tokens to end tokens of what is to be \
recognized as brackets.
.. note:: This function will also strip content within brackets. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/text.py#L92-L107 | [
"def nfilter(seq):\n \"\"\"Replacement for python 2's filter(None, seq).\n\n :return: a list filtered from seq containing only truthy items.\n \"\"\"\n return [e for e in seq if e]\n",
"def strip_brackets(text, brackets=None):\n \"\"\"Strip brackets and what is inside brackets from text.\n\n .. note::\n If the text contains only one opening bracket, the rest of the text\n will be ignored. This is a feature, not a bug, as we want to avoid that\n this function raises errors too easily.\n \"\"\"\n res = []\n for c, type_ in _tokens(text, brackets=brackets):\n if type_ == TextType.text:\n res.append(c)\n return ''.join(res).strip()\n"
] | # coding: utf8
from __future__ import unicode_literals
import re
import textwrap
from clldutils.misc import nfilter
# Brackets are pairs of single characters (<start-token>, <end-token>):
BRACKETS = {
"(": ")",
"{": "}",
"[": "]",
"(": ")",
"【": "】",
"『": "』",
"«": "»",
"⁽": "⁾",
"₍": "₎"
}
# To make it possible to detect compiled regex patterns, we store their type.
# See also http://stackoverflow.com/a/6102100
PATTERN_TYPE = type(re.compile('a'))
# A string of all unicode characters regarded as whitespace (by python's re module \s):
# See also http://stackoverflow.com/a/37903645
WHITESPACE = \
'\t\n\x0b\x0c\r\x1c\x1d\x1e\x1f \x85\xa0\u1680\u2000\u2001\u2002\u2003\u2004\u2005' \
'\u2006\u2007\u2008\u2009\u200a\u2028\u2029\u202f\u205f\u3000'
class TextType(object):
text = 1 # token outside of brackets
open = 2 # start-token of a bracket
context = 3 # non-bracket token inside brackets
close = 4 # end-token of a bracket
def _tokens(text, brackets=None):
if brackets is None:
brackets = BRACKETS
stack = []
for c in text:
if c in brackets:
stack.append(brackets[c])
yield c, TextType.open
elif stack and c == stack[-1]:
stack.pop()
yield c, TextType.close
elif not stack:
yield c, TextType.text
else:
yield c, TextType.context
def strip_brackets(text, brackets=None):
"""Strip brackets and what is inside brackets from text.
.. note::
If the text contains only one opening bracket, the rest of the text
will be ignored. This is a feature, not a bug, as we want to avoid that
this function raises errors too easily.
"""
res = []
for c, type_ in _tokens(text, brackets=brackets):
if type_ == TextType.text:
res.append(c)
return ''.join(res).strip()
def split_text_with_context(text, separators=WHITESPACE, brackets=None):
"""Splits text at separators outside of brackets.
:param text:
:param separators: An iterable of single character tokens.
:param brackets:
:return: A `list` of non-empty chunks.
.. note:: This function leaves content in brackets in the chunks.
"""
res, chunk = [], []
for c, type_ in _tokens(text, brackets=brackets):
if type_ == TextType.text and c in separators:
res.append(''.join(chunk).strip())
chunk = []
else:
chunk.append(c)
res.append(''.join(chunk).strip())
return nfilter(res)
def strip_chars(chars, sequence):
"""Strip the specified chars from anywhere in the text.
:param chars: An iterable of single character tokens to be stripped out.
:param sequence: An iterable of single character tokens.
:return: Text string concatenating all tokens in sequence which were not stripped.
"""
return ''.join(s for s in sequence if s not in chars)
def truncate_with_ellipsis(t, ellipsis='\u2026', width=40, **kw):
return t if len(t) <= width else textwrap.wrap(t, width=width, **kw)[0] + ellipsis
|
clld/clldutils | src/clldutils/sfm.py | marker_split | python | def marker_split(block):
marker = None
value = []
for line in block.split('\n'):
line = line.strip()
if line.startswith('\\_'):
continue # we simply ignore SFM header fields
match = MARKER_PATTERN.match(line)
if match:
if marker:
yield marker, '\n'.join(value)
marker = match.group('marker')
value = [line[match.end():]]
else:
value.append(line)
if marker:
yield marker, ('\n'.join(value)).strip() | Yield marker, value pairs from a text block (i.e. a list of lines).
:param block: text block consisting of \n separated lines as it will be the case for \
files read using "rU" mode.
:return: generator of (marker, value) pairs. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/sfm.py#L30-L53 | null | """Functionality to handle SIL Standard Format (SFM) files
#
# FIXME: seealso link for SFM spec!
#
This format is used natively for Toolbox. Applications which can export in a SFM format
include
- ELAN FIXME: link!
- Flex FIXME: link!
This implementation supports
- multiline values
- custom entry separator
"""
from __future__ import unicode_literals
import io
import re
import collections
from clldutils.misc import UnicodeMixin
MARKER_PATTERN = re.compile('\\\\(?P<marker>[A-Za-z1-3][A-Za-z_]*[0-9]*)(\s+|$)')
FIELD_SPLITTER_PATTERN = re.compile(';\s+')
class Entry(list, UnicodeMixin):
"""We store entries in SFM files as lists of (marker, value) pairs."""
@classmethod
def from_string(cls, block, keep_empty=False):
entry = cls()
for marker, value in marker_split(block.strip()):
value = value.strip()
if value or keep_empty:
entry.append((marker, value))
return entry
def markers(self):
return collections.Counter(k for k, _ in self)
def get(self, key, default=None):
"""Retrieve the first value for a marker or None."""
for k, v in self:
if k == key:
return v
return default
def getall(self, key):
"""Retrieve all values for a marker."""
return [v for k, v in self if k == key]
def __unicode__(self):
lines = []
for key, value in self:
lines.append('%s %s' % (key, value))
return '\n'.join('\\' + l for l in lines)
def parse(filename, encoding, entry_sep, entry_prefix, keep_empty=False):
with io.open(str(filename), 'r', encoding=encoding, newline=None) as fp:
content = fp.read()
for block in content.split(entry_sep):
if block.strip():
block = entry_prefix + block
else:
continue # pragma: no cover
yield [(k, v.strip())
for k, v in marker_split(block.strip()) if v.strip() or keep_empty]
class SFM(list):
"""A list of Entries
Simple usage to normalize a sfm file:
>>> sfm = SFM.from_file(fname, marker_map={'lexeme': 'lx'})
>>> sfm.write(fname)
"""
@classmethod
def from_file(cls, filename, **kw):
sfm = cls()
sfm.read(filename, **kw)
return sfm
def read(self,
filename,
encoding='utf-8',
marker_map=None,
entry_impl=Entry,
entry_sep='\n\n',
entry_prefix=None,
keep_empty=False):
"""Extend the list by parsing new entries from a file.
:param filename:
:param encoding:
:param marker_map: A dict used to map marker names.
:param entry_impl: Subclass of Entry or None
:param entry_sep:
:param entry_prefix:
"""
marker_map = marker_map or {}
for entry in parse(
filename,
encoding,
entry_sep,
entry_prefix or entry_sep,
keep_empty=keep_empty):
if entry:
self.append(entry_impl([(marker_map.get(k, k), v) for k, v in entry]))
def visit(self, visitor):
for i, entry in enumerate(self):
self[i] = visitor(entry) or entry
def write(self, filename, encoding='utf-8'):
"""Write the list of entries to a file.
:param filename:
:param encoding:
:return:
"""
with io.open(str(filename), 'w', encoding=encoding) as fp:
for entry in self:
fp.write(entry.__unicode__())
fp.write('\n\n')
|
clld/clldutils | src/clldutils/sfm.py | Entry.get | python | def get(self, key, default=None):
for k, v in self:
if k == key:
return v
return default | Retrieve the first value for a marker or None. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/sfm.py#L71-L76 | null | class Entry(list, UnicodeMixin):
"""We store entries in SFM files as lists of (marker, value) pairs."""
@classmethod
def from_string(cls, block, keep_empty=False):
entry = cls()
for marker, value in marker_split(block.strip()):
value = value.strip()
if value or keep_empty:
entry.append((marker, value))
return entry
def markers(self):
return collections.Counter(k for k, _ in self)
def getall(self, key):
"""Retrieve all values for a marker."""
return [v for k, v in self if k == key]
def __unicode__(self):
lines = []
for key, value in self:
lines.append('%s %s' % (key, value))
return '\n'.join('\\' + l for l in lines)
|
clld/clldutils | src/clldutils/sfm.py | SFM.read | python | def read(self,
filename,
encoding='utf-8',
marker_map=None,
entry_impl=Entry,
entry_sep='\n\n',
entry_prefix=None,
keep_empty=False):
marker_map = marker_map or {}
for entry in parse(
filename,
encoding,
entry_sep,
entry_prefix or entry_sep,
keep_empty=keep_empty):
if entry:
self.append(entry_impl([(marker_map.get(k, k), v) for k, v in entry])) | Extend the list by parsing new entries from a file.
:param filename:
:param encoding:
:param marker_map: A dict used to map marker names.
:param entry_impl: Subclass of Entry or None
:param entry_sep:
:param entry_prefix: | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/sfm.py#L117-L142 | [
"def parse(filename, encoding, entry_sep, entry_prefix, keep_empty=False):\n with io.open(str(filename), 'r', encoding=encoding, newline=None) as fp:\n content = fp.read()\n\n for block in content.split(entry_sep):\n if block.strip():\n block = entry_prefix + block\n else:\n continue # pragma: no cover\n yield [(k, v.strip())\n for k, v in marker_split(block.strip()) if v.strip() or keep_empty]\n"
] | class SFM(list):
"""A list of Entries
Simple usage to normalize a sfm file:
>>> sfm = SFM.from_file(fname, marker_map={'lexeme': 'lx'})
>>> sfm.write(fname)
"""
@classmethod
def from_file(cls, filename, **kw):
sfm = cls()
sfm.read(filename, **kw)
return sfm
def visit(self, visitor):
for i, entry in enumerate(self):
self[i] = visitor(entry) or entry
def write(self, filename, encoding='utf-8'):
"""Write the list of entries to a file.
:param filename:
:param encoding:
:return:
"""
with io.open(str(filename), 'w', encoding=encoding) as fp:
for entry in self:
fp.write(entry.__unicode__())
fp.write('\n\n')
|
clld/clldutils | src/clldutils/sfm.py | SFM.write | python | def write(self, filename, encoding='utf-8'):
with io.open(str(filename), 'w', encoding=encoding) as fp:
for entry in self:
fp.write(entry.__unicode__())
fp.write('\n\n') | Write the list of entries to a file.
:param filename:
:param encoding:
:return: | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/sfm.py#L148-L158 | null | class SFM(list):
"""A list of Entries
Simple usage to normalize a sfm file:
>>> sfm = SFM.from_file(fname, marker_map={'lexeme': 'lx'})
>>> sfm.write(fname)
"""
@classmethod
def from_file(cls, filename, **kw):
sfm = cls()
sfm.read(filename, **kw)
return sfm
def read(self,
filename,
encoding='utf-8',
marker_map=None,
entry_impl=Entry,
entry_sep='\n\n',
entry_prefix=None,
keep_empty=False):
"""Extend the list by parsing new entries from a file.
:param filename:
:param encoding:
:param marker_map: A dict used to map marker names.
:param entry_impl: Subclass of Entry or None
:param entry_sep:
:param entry_prefix:
"""
marker_map = marker_map or {}
for entry in parse(
filename,
encoding,
entry_sep,
entry_prefix or entry_sep,
keep_empty=keep_empty):
if entry:
self.append(entry_impl([(marker_map.get(k, k), v) for k, v in entry]))
def visit(self, visitor):
for i, entry in enumerate(self):
self[i] = visitor(entry) or entry
|
clld/clldutils | src/clldutils/clilib.py | confirm | python | def confirm(question, default=True):
valid = {"": default, "yes": True, "y": True, "no": False, "n": False}
while 1:
choice = input(question + (" [Y/n] " if default else " [y/N] ")).lower()
if choice in valid:
return valid[choice]
print("Please respond with 'y' or 'n' ") | Ask a yes/no question interactively.
:param question: The text of the question to ask.
:returns: True if the answer was "yes", False otherwise. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/clilib.py#L113-L124 | null | from __future__ import unicode_literals, print_function, division
import argparse
import logging
from collections import OrderedDict
from six.moves import input
from clldutils.loglib import Logging, get_colorlog
class ParserError(Exception):
pass
# Global registry for commands.
# Note: This registry is global so it can only be used for one ArgumentParser instance.
# Otherwise, different ArgumentParsers will share the same sub-commands which will rarely
# be intended.
_COMMANDS = []
class Command(object):
def __init__(self, func, name=None, usage=None):
self.func = func
self.name = name or func.__name__
self.usage = usage
@property
def doc(self):
return self.usage or self.func.__doc__
def __call__(self, args):
return self.func(args)
def command(name=None, usage=None):
def wrap(f):
_COMMANDS.append(Command(f, name=name, usage=usage))
return f
return wrap
def _attr(obj, attr):
return getattr(obj, attr, getattr(obj, '__{0}__'.format(attr), None))
class ArgumentParser(argparse.ArgumentParser):
"""A command line argument parser supporting sub-commands in a simple way.
Sub-commands can be registered in one of two ways:
- Passing functions as positional arguments into `ArgumentParser.__init__`.
- Decorating functions with the `command` decorator.
"""
def __init__(self, pkg_name, *commands, **kw):
commands = commands or _COMMANDS
kw.setdefault(
'description', "Main command line interface of the %s package." % pkg_name)
kw.setdefault(
'epilog', "Use '%(prog)s help <cmd>' to get help about individual commands.")
super(ArgumentParser, self).__init__(**kw)
self.commands = OrderedDict((_attr(cmd, 'name'), cmd) for cmd in commands)
self.pkg_name = pkg_name
self.add_argument("--verbosity", help="increase output verbosity")
self.add_argument('command', help=' | '.join(self.commands))
self.add_argument('args', nargs=argparse.REMAINDER)
def main(self, args=None, catch_all=False, parsed_args=None):
args = parsed_args or self.parse_args(args=args)
if args.command == 'help' and len(args.args):
# As help text for individual commands we simply re-use the docstrings of the
# callables registered for the command:
print(_attr(self.commands[args.args[0]], 'doc'))
else:
if args.command not in self.commands:
print('invalid command')
self.print_help()
return 64
try:
self.commands[args.command](args)
except ParserError as e:
print(e)
print(_attr(self.commands[args.command], 'doc'))
return 64
except Exception as e:
if catch_all:
print(e)
return 1
raise
return 0
class ArgumentParserWithLogging(ArgumentParser):
def __init__(self, pkg_name, *commands, **kw):
super(ArgumentParserWithLogging, self).__init__(pkg_name, *commands, **kw)
self.add_argument('--log', default=get_colorlog(pkg_name), help=argparse.SUPPRESS)
self.add_argument(
'--log-level',
default=logging.INFO,
help='log level [ERROR|WARN|INFO|DEBUG]',
type=lambda x: getattr(logging, x))
def main(self, args=None, catch_all=False, parsed_args=None):
args = parsed_args or self.parse_args(args=args)
with Logging(args.log, level=args.log_level):
return super(ArgumentParserWithLogging, self).main(
catch_all=catch_all, parsed_args=args)
|
clld/clldutils | src/clldutils/inifile.py | INI.gettext | python | def gettext(self, section, option, whitespace_preserving_prefix='.'):
lines = []
for line in self.get(section, option, fallback='').splitlines():
if re.match(re.escape(whitespace_preserving_prefix) + '\s+', line):
line = line[len(whitespace_preserving_prefix):]
lines.append(line)
return '\n'.join(lines) | While configparser supports multiline values, it does this at the expense of
stripping leading whitespace for each line in such a value. Sometimes we want
to preserve such whitespace, e.g. to be able to put markdown with nested lists
into INI files. We support this be introducing a special prefix, which is
prepended to lines starting with whitespace in `settext` and stripped in
`gettext`. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/inifile.py#L44-L58 | null | class INI(configparser.ConfigParser):
@staticmethod
def format_list(l):
return ''.join('\n' + item for item in l)
@classmethod
def from_file(cls, fname, encoding='utf-8', **kw):
obj = cls(**kw)
obj.read(str(fname), encoding=encoding)
return obj
def write_string(self, **kw):
res = StringIO()
res.write('# -*- coding: utf-8 -*-\n')
super(INI, self).write(res, **kw)
return res.getvalue()
def set(self, section, option, value=None):
if value is None:
return
if not self.has_section(section):
self.add_section(section)
if isinstance(value, (list, tuple)):
value = self.format_list(value)
elif not isinstance(value, string_types):
value = '%s' % value
super(INI, self).set(section, option, value)
def getlist(self, section, option):
return self.get(section, option, fallback='').strip().splitlines()
def settext(self, section, option, value, whitespace_preserving_prefix='.'):
lines = []
for line in value.splitlines():
if re.match('\s+', line):
line = whitespace_preserving_prefix + line
lines.append(line)
self.set(section, option, '\n'.join(lines))
def write(self, fname, **kw):
with io.open(str(fname), 'w', encoding='utf-8') as fp:
fp.write(self.write_string(**kw))
|
clld/clldutils | src/clldutils/misc.py | data_url | python | def data_url(content, mimetype=None):
if isinstance(content, pathlib.Path):
if not mimetype:
mimetype = guess_type(content.name)[0]
with content.open('rb') as fp:
content = fp.read()
else:
if isinstance(content, text_type):
content = content.encode('utf8')
return "data:{0};base64,{1}".format(
mimetype or 'application/octet-stream', b64encode(content).decode()) | Returns content encoded as base64 Data URI.
:param content: bytes or str or Path
:param mimetype: mimetype for
:return: str object (consisting only of ASCII, though)
.. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L24-L43 | null | """Generic utility functions."""
from __future__ import unicode_literals, division, absolute_import
import re
import string
import unicodedata
from mimetypes import guess_type
from base64 import b64encode
import warnings
from clldutils._compat import pathlib
from six import PY3, string_types, text_type, binary_type, iteritems
__all__ = [
'data_url', 'log_or_raise', 'nfilter', 'to_binary', 'dict_merged', 'NoDefault', 'NO_DEFAULT',
'xmlchars', 'format_size', 'UnicodeMixin', 'slug', 'encoded', 'lazyproperty',
# Deprecated:
'cached_property',
]
def log_or_raise(msg, log=None, level='warn', exception_cls=ValueError):
if log:
getattr(log, level)(msg)
else:
raise exception_cls(msg)
def nfilter(seq):
"""Replacement for python 2's filter(None, seq).
:return: a list filtered from seq containing only truthy items.
"""
return [e for e in seq if e]
def to_binary(s, encoding='utf8'):
"""Portable cast function.
In python 2 the ``str`` function which is used to coerce objects to bytes does not
accept an encoding argument, whereas python 3's ``bytes`` function requires one.
:param s: object to be converted to binary_type
:return: binary_type instance, representing s.
"""
if PY3: # pragma: no cover
return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding)
return binary_type(s) # pragma: no cover
def dict_merged(d, _filter=None, **kw):
"""Update dictionary d with the items passed as kw if the value passes _filter."""
def f(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in iteritems(kw):
if f(v):
d[k] = v
return d
class NoDefault(object):
def __repr__(self):
return '<NoDefault>'
#: A singleton which can be used to distinguish no-argument-passed from None passed as
#: argument in callables with optional arguments.
NO_DEFAULT = NoDefault()
def xmlchars(text):
"""Not all of UTF-8 is considered valid character data in XML ...
Thus, this function can be used to remove illegal characters from ``text``.
"""
invalid = list(range(0x9))
invalid.extend([0xb, 0xc])
invalid.extend(range(0xe, 0x20))
return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
def format_size(num):
"""Format byte-sizes.
.. seealso:: http://stackoverflow.com/a/1094933
"""
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class UnicodeMixin(object):
"""Portable label mixin."""
def __unicode__(self):
"""a human readable label for the object."""
return '%s' % self # pragma: no cover
if PY3:
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__()
else: # pragma: no cover
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__().encode('utf-8')
def slug(s, remove_whitespace=True, lowercase=True):
"""Condensed version of s, containing only lowercase alphanumeric characters."""
res = ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
if lowercase:
res = res.lower()
for c in string.punctuation:
res = res.replace(c, '')
res = re.sub('\s+', '' if remove_whitespace else ' ', res)
res = res.encode('ascii', 'ignore').decode('ascii')
assert re.match('[ A-Za-z0-9]*$', res)
return res
def encoded(string, encoding='utf-8'):
"""Cast string to binary_type.
:param string: six.binary_type or six.text_type
:param encoding: encoding which the object is forced to
:return: six.binary_type
"""
assert isinstance(string, string_types) or isinstance(string, binary_type)
if isinstance(string, text_type):
return string.encode(encoding)
try:
# make sure the string can be decoded in the specified encoding ...
string.decode(encoding)
return string
except UnicodeDecodeError:
# ... if not use latin1 as best guess to decode the string before encoding as
# specified.
return string.decode('latin1').encode(encoding)
# NOTE: this can probably replace cached_property below in most cases (consider deprecation)
# - no call overhead after caching (cached attribute is just a plain instance attribute)
# - no additional dict for caching (just delete the instance attribute to expire manually)
# - no AttributeError when trying to access the attribute on the class
# - no parenthesis for usage
class lazyproperty(object):
"""Non-data descriptor caching the computed result as instance attribute.
>>> class Spam(object):
... @lazyproperty
... def eggs(self):
... return 'spamspamspam'
>>> spam=Spam(); spam.eggs
'spamspamspam'
>>> spam.eggs='eggseggseggs'; spam.eggs
'eggseggseggs'
>>> Spam().eggs
'spamspamspam'
>>> Spam.eggs # doctest: +ELLIPSIS
<...lazyproperty object at 0x...>
"""
def __init__(self, fget):
self.fget = fget
for attr in ('__module__', '__name__', '__doc__'):
setattr(self, attr, getattr(fget, attr))
def __get__(self, instance, owner):
if instance is None:
return self
result = instance.__dict__[self.__name__] = self.fget(instance)
return result
class cached_property(object):
"""Decorator for read-only properties evaluated only once.
It can be used to create a cached property like this::
import random
# the class containing the property must be a new-style class
class MyClass(object):
# create property whose value is cached
@cached_property()
def randint(self):
# will only be evaluated once.
return random.randint(0, 100)
The value is cached in the '_cache' attribute of the object instance that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator. Each entry in the cache is
created only when the property is accessed for the first time and is the last
computed property value.
To expire a cached property value manually just do::
del instance._cache[<property name>]
inspired by the recipe by Christopher Arndt in the PythonDecoratorLibrary
"""
def __call__(self, fget):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"Use of deprecated decorator cached_property! Use lazyproperty instead.",
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
self.fget = fget
self.__doc__ = fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
return self
def __get__(self, inst, owner):
if not hasattr(inst, '_cache'):
inst._cache = {}
if self.__name__ not in inst._cache:
inst._cache[self.__name__] = self.fget(inst)
return inst._cache[self.__name__]
|
clld/clldutils | src/clldutils/misc.py | to_binary | python | def to_binary(s, encoding='utf8'):
if PY3: # pragma: no cover
return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding)
return binary_type(s) | Portable cast function.
In python 2 the ``str`` function which is used to coerce objects to bytes does not
accept an encoding argument, whereas python 3's ``bytes`` function requires one.
:param s: object to be converted to binary_type
:return: binary_type instance, representing s. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L61-L72 | null | """Generic utility functions."""
from __future__ import unicode_literals, division, absolute_import
import re
import string
import unicodedata
from mimetypes import guess_type
from base64 import b64encode
import warnings
from clldutils._compat import pathlib
from six import PY3, string_types, text_type, binary_type, iteritems
__all__ = [
'data_url', 'log_or_raise', 'nfilter', 'to_binary', 'dict_merged', 'NoDefault', 'NO_DEFAULT',
'xmlchars', 'format_size', 'UnicodeMixin', 'slug', 'encoded', 'lazyproperty',
# Deprecated:
'cached_property',
]
def data_url(content, mimetype=None):
"""
Returns content encoded as base64 Data URI.
:param content: bytes or str or Path
:param mimetype: mimetype for
:return: str object (consisting only of ASCII, though)
.. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme
"""
if isinstance(content, pathlib.Path):
if not mimetype:
mimetype = guess_type(content.name)[0]
with content.open('rb') as fp:
content = fp.read()
else:
if isinstance(content, text_type):
content = content.encode('utf8')
return "data:{0};base64,{1}".format(
mimetype or 'application/octet-stream', b64encode(content).decode())
def log_or_raise(msg, log=None, level='warn', exception_cls=ValueError):
if log:
getattr(log, level)(msg)
else:
raise exception_cls(msg)
def nfilter(seq):
"""Replacement for python 2's filter(None, seq).
:return: a list filtered from seq containing only truthy items.
"""
return [e for e in seq if e]
# pragma: no cover
def dict_merged(d, _filter=None, **kw):
"""Update dictionary d with the items passed as kw if the value passes _filter."""
def f(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in iteritems(kw):
if f(v):
d[k] = v
return d
class NoDefault(object):
def __repr__(self):
return '<NoDefault>'
#: A singleton which can be used to distinguish no-argument-passed from None passed as
#: argument in callables with optional arguments.
NO_DEFAULT = NoDefault()
def xmlchars(text):
"""Not all of UTF-8 is considered valid character data in XML ...
Thus, this function can be used to remove illegal characters from ``text``.
"""
invalid = list(range(0x9))
invalid.extend([0xb, 0xc])
invalid.extend(range(0xe, 0x20))
return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
def format_size(num):
"""Format byte-sizes.
.. seealso:: http://stackoverflow.com/a/1094933
"""
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class UnicodeMixin(object):
"""Portable label mixin."""
def __unicode__(self):
"""a human readable label for the object."""
return '%s' % self # pragma: no cover
if PY3:
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__()
else: # pragma: no cover
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__().encode('utf-8')
def slug(s, remove_whitespace=True, lowercase=True):
"""Condensed version of s, containing only lowercase alphanumeric characters."""
res = ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
if lowercase:
res = res.lower()
for c in string.punctuation:
res = res.replace(c, '')
res = re.sub('\s+', '' if remove_whitespace else ' ', res)
res = res.encode('ascii', 'ignore').decode('ascii')
assert re.match('[ A-Za-z0-9]*$', res)
return res
def encoded(string, encoding='utf-8'):
"""Cast string to binary_type.
:param string: six.binary_type or six.text_type
:param encoding: encoding which the object is forced to
:return: six.binary_type
"""
assert isinstance(string, string_types) or isinstance(string, binary_type)
if isinstance(string, text_type):
return string.encode(encoding)
try:
# make sure the string can be decoded in the specified encoding ...
string.decode(encoding)
return string
except UnicodeDecodeError:
# ... if not use latin1 as best guess to decode the string before encoding as
# specified.
return string.decode('latin1').encode(encoding)
# NOTE: this can probably replace cached_property below in most cases (consider deprecation)
# - no call overhead after caching (cached attribute is just a plain instance attribute)
# - no additional dict for caching (just delete the instance attribute to expire manually)
# - no AttributeError when trying to access the attribute on the class
# - no parenthesis for usage
class lazyproperty(object):
"""Non-data descriptor caching the computed result as instance attribute.
>>> class Spam(object):
... @lazyproperty
... def eggs(self):
... return 'spamspamspam'
>>> spam=Spam(); spam.eggs
'spamspamspam'
>>> spam.eggs='eggseggseggs'; spam.eggs
'eggseggseggs'
>>> Spam().eggs
'spamspamspam'
>>> Spam.eggs # doctest: +ELLIPSIS
<...lazyproperty object at 0x...>
"""
def __init__(self, fget):
self.fget = fget
for attr in ('__module__', '__name__', '__doc__'):
setattr(self, attr, getattr(fget, attr))
def __get__(self, instance, owner):
if instance is None:
return self
result = instance.__dict__[self.__name__] = self.fget(instance)
return result
class cached_property(object):
"""Decorator for read-only properties evaluated only once.
It can be used to create a cached property like this::
import random
# the class containing the property must be a new-style class
class MyClass(object):
# create property whose value is cached
@cached_property()
def randint(self):
# will only be evaluated once.
return random.randint(0, 100)
The value is cached in the '_cache' attribute of the object instance that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator. Each entry in the cache is
created only when the property is accessed for the first time and is the last
computed property value.
To expire a cached property value manually just do::
del instance._cache[<property name>]
inspired by the recipe by Christopher Arndt in the PythonDecoratorLibrary
"""
def __call__(self, fget):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"Use of deprecated decorator cached_property! Use lazyproperty instead.",
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
self.fget = fget
self.__doc__ = fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
return self
def __get__(self, inst, owner):
if not hasattr(inst, '_cache'):
inst._cache = {}
if self.__name__ not in inst._cache:
inst._cache[self.__name__] = self.fget(inst)
return inst._cache[self.__name__]
|
clld/clldutils | src/clldutils/misc.py | dict_merged | python | def dict_merged(d, _filter=None, **kw):
def f(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in iteritems(kw):
if f(v):
d[k] = v
return d | Update dictionary d with the items passed as kw if the value passes _filter. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L75-L85 | [
"def f(s):\n if _filter:\n return _filter(s)\n return s is not None\n"
] | """Generic utility functions."""
from __future__ import unicode_literals, division, absolute_import
import re
import string
import unicodedata
from mimetypes import guess_type
from base64 import b64encode
import warnings
from clldutils._compat import pathlib
from six import PY3, string_types, text_type, binary_type, iteritems
__all__ = [
'data_url', 'log_or_raise', 'nfilter', 'to_binary', 'dict_merged', 'NoDefault', 'NO_DEFAULT',
'xmlchars', 'format_size', 'UnicodeMixin', 'slug', 'encoded', 'lazyproperty',
# Deprecated:
'cached_property',
]
def data_url(content, mimetype=None):
"""
Returns content encoded as base64 Data URI.
:param content: bytes or str or Path
:param mimetype: mimetype for
:return: str object (consisting only of ASCII, though)
.. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme
"""
if isinstance(content, pathlib.Path):
if not mimetype:
mimetype = guess_type(content.name)[0]
with content.open('rb') as fp:
content = fp.read()
else:
if isinstance(content, text_type):
content = content.encode('utf8')
return "data:{0};base64,{1}".format(
mimetype or 'application/octet-stream', b64encode(content).decode())
def log_or_raise(msg, log=None, level='warn', exception_cls=ValueError):
if log:
getattr(log, level)(msg)
else:
raise exception_cls(msg)
def nfilter(seq):
"""Replacement for python 2's filter(None, seq).
:return: a list filtered from seq containing only truthy items.
"""
return [e for e in seq if e]
def to_binary(s, encoding='utf8'):
"""Portable cast function.
In python 2 the ``str`` function which is used to coerce objects to bytes does not
accept an encoding argument, whereas python 3's ``bytes`` function requires one.
:param s: object to be converted to binary_type
:return: binary_type instance, representing s.
"""
if PY3: # pragma: no cover
return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding)
return binary_type(s) # pragma: no cover
class NoDefault(object):
def __repr__(self):
return '<NoDefault>'
#: A singleton which can be used to distinguish no-argument-passed from None passed as
#: argument in callables with optional arguments.
NO_DEFAULT = NoDefault()
def xmlchars(text):
"""Not all of UTF-8 is considered valid character data in XML ...
Thus, this function can be used to remove illegal characters from ``text``.
"""
invalid = list(range(0x9))
invalid.extend([0xb, 0xc])
invalid.extend(range(0xe, 0x20))
return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
def format_size(num):
"""Format byte-sizes.
.. seealso:: http://stackoverflow.com/a/1094933
"""
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class UnicodeMixin(object):
"""Portable label mixin."""
def __unicode__(self):
"""a human readable label for the object."""
return '%s' % self # pragma: no cover
if PY3:
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__()
else: # pragma: no cover
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__().encode('utf-8')
def slug(s, remove_whitespace=True, lowercase=True):
"""Condensed version of s, containing only lowercase alphanumeric characters."""
res = ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
if lowercase:
res = res.lower()
for c in string.punctuation:
res = res.replace(c, '')
res = re.sub('\s+', '' if remove_whitespace else ' ', res)
res = res.encode('ascii', 'ignore').decode('ascii')
assert re.match('[ A-Za-z0-9]*$', res)
return res
def encoded(string, encoding='utf-8'):
"""Cast string to binary_type.
:param string: six.binary_type or six.text_type
:param encoding: encoding which the object is forced to
:return: six.binary_type
"""
assert isinstance(string, string_types) or isinstance(string, binary_type)
if isinstance(string, text_type):
return string.encode(encoding)
try:
# make sure the string can be decoded in the specified encoding ...
string.decode(encoding)
return string
except UnicodeDecodeError:
# ... if not use latin1 as best guess to decode the string before encoding as
# specified.
return string.decode('latin1').encode(encoding)
# NOTE: this can probably replace cached_property below in most cases (consider deprecation)
# - no call overhead after caching (cached attribute is just a plain instance attribute)
# - no additional dict for caching (just delete the instance attribute to expire manually)
# - no AttributeError when trying to access the attribute on the class
# - no parenthesis for usage
class lazyproperty(object):
"""Non-data descriptor caching the computed result as instance attribute.
>>> class Spam(object):
... @lazyproperty
... def eggs(self):
... return 'spamspamspam'
>>> spam=Spam(); spam.eggs
'spamspamspam'
>>> spam.eggs='eggseggseggs'; spam.eggs
'eggseggseggs'
>>> Spam().eggs
'spamspamspam'
>>> Spam.eggs # doctest: +ELLIPSIS
<...lazyproperty object at 0x...>
"""
def __init__(self, fget):
self.fget = fget
for attr in ('__module__', '__name__', '__doc__'):
setattr(self, attr, getattr(fget, attr))
def __get__(self, instance, owner):
if instance is None:
return self
result = instance.__dict__[self.__name__] = self.fget(instance)
return result
class cached_property(object):
"""Decorator for read-only properties evaluated only once.
It can be used to create a cached property like this::
import random
# the class containing the property must be a new-style class
class MyClass(object):
# create property whose value is cached
@cached_property()
def randint(self):
# will only be evaluated once.
return random.randint(0, 100)
The value is cached in the '_cache' attribute of the object instance that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator. Each entry in the cache is
created only when the property is accessed for the first time and is the last
computed property value.
To expire a cached property value manually just do::
del instance._cache[<property name>]
inspired by the recipe by Christopher Arndt in the PythonDecoratorLibrary
"""
def __call__(self, fget):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"Use of deprecated decorator cached_property! Use lazyproperty instead.",
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
self.fget = fget
self.__doc__ = fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
return self
def __get__(self, inst, owner):
if not hasattr(inst, '_cache'):
inst._cache = {}
if self.__name__ not in inst._cache:
inst._cache[self.__name__] = self.fget(inst)
return inst._cache[self.__name__]
|
clld/clldutils | src/clldutils/misc.py | xmlchars | python | def xmlchars(text):
invalid = list(range(0x9))
invalid.extend([0xb, 0xc])
invalid.extend(range(0xe, 0x20))
return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text) | Not all of UTF-8 is considered valid character data in XML ...
Thus, this function can be used to remove illegal characters from ``text``. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L99-L107 | null | """Generic utility functions."""
from __future__ import unicode_literals, division, absolute_import
import re
import string
import unicodedata
from mimetypes import guess_type
from base64 import b64encode
import warnings
from clldutils._compat import pathlib
from six import PY3, string_types, text_type, binary_type, iteritems
__all__ = [
'data_url', 'log_or_raise', 'nfilter', 'to_binary', 'dict_merged', 'NoDefault', 'NO_DEFAULT',
'xmlchars', 'format_size', 'UnicodeMixin', 'slug', 'encoded', 'lazyproperty',
# Deprecated:
'cached_property',
]
def data_url(content, mimetype=None):
"""
Returns content encoded as base64 Data URI.
:param content: bytes or str or Path
:param mimetype: mimetype for
:return: str object (consisting only of ASCII, though)
.. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme
"""
if isinstance(content, pathlib.Path):
if not mimetype:
mimetype = guess_type(content.name)[0]
with content.open('rb') as fp:
content = fp.read()
else:
if isinstance(content, text_type):
content = content.encode('utf8')
return "data:{0};base64,{1}".format(
mimetype or 'application/octet-stream', b64encode(content).decode())
def log_or_raise(msg, log=None, level='warn', exception_cls=ValueError):
if log:
getattr(log, level)(msg)
else:
raise exception_cls(msg)
def nfilter(seq):
"""Replacement for python 2's filter(None, seq).
:return: a list filtered from seq containing only truthy items.
"""
return [e for e in seq if e]
def to_binary(s, encoding='utf8'):
"""Portable cast function.
In python 2 the ``str`` function which is used to coerce objects to bytes does not
accept an encoding argument, whereas python 3's ``bytes`` function requires one.
:param s: object to be converted to binary_type
:return: binary_type instance, representing s.
"""
if PY3: # pragma: no cover
return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding)
return binary_type(s) # pragma: no cover
def dict_merged(d, _filter=None, **kw):
"""Update dictionary d with the items passed as kw if the value passes _filter."""
def f(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in iteritems(kw):
if f(v):
d[k] = v
return d
class NoDefault(object):
def __repr__(self):
return '<NoDefault>'
#: A singleton which can be used to distinguish no-argument-passed from None passed as
#: argument in callables with optional arguments.
NO_DEFAULT = NoDefault()
def format_size(num):
"""Format byte-sizes.
.. seealso:: http://stackoverflow.com/a/1094933
"""
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class UnicodeMixin(object):
"""Portable label mixin."""
def __unicode__(self):
"""a human readable label for the object."""
return '%s' % self # pragma: no cover
if PY3:
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__()
else: # pragma: no cover
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__().encode('utf-8')
def slug(s, remove_whitespace=True, lowercase=True):
"""Condensed version of s, containing only lowercase alphanumeric characters."""
res = ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
if lowercase:
res = res.lower()
for c in string.punctuation:
res = res.replace(c, '')
res = re.sub('\s+', '' if remove_whitespace else ' ', res)
res = res.encode('ascii', 'ignore').decode('ascii')
assert re.match('[ A-Za-z0-9]*$', res)
return res
def encoded(string, encoding='utf-8'):
"""Cast string to binary_type.
:param string: six.binary_type or six.text_type
:param encoding: encoding which the object is forced to
:return: six.binary_type
"""
assert isinstance(string, string_types) or isinstance(string, binary_type)
if isinstance(string, text_type):
return string.encode(encoding)
try:
# make sure the string can be decoded in the specified encoding ...
string.decode(encoding)
return string
except UnicodeDecodeError:
# ... if not use latin1 as best guess to decode the string before encoding as
# specified.
return string.decode('latin1').encode(encoding)
# NOTE: this can probably replace cached_property below in most cases (consider deprecation)
# - no call overhead after caching (cached attribute is just a plain instance attribute)
# - no additional dict for caching (just delete the instance attribute to expire manually)
# - no AttributeError when trying to access the attribute on the class
# - no parenthesis for usage
class lazyproperty(object):
"""Non-data descriptor caching the computed result as instance attribute.
>>> class Spam(object):
... @lazyproperty
... def eggs(self):
... return 'spamspamspam'
>>> spam=Spam(); spam.eggs
'spamspamspam'
>>> spam.eggs='eggseggseggs'; spam.eggs
'eggseggseggs'
>>> Spam().eggs
'spamspamspam'
>>> Spam.eggs # doctest: +ELLIPSIS
<...lazyproperty object at 0x...>
"""
def __init__(self, fget):
self.fget = fget
for attr in ('__module__', '__name__', '__doc__'):
setattr(self, attr, getattr(fget, attr))
def __get__(self, instance, owner):
if instance is None:
return self
result = instance.__dict__[self.__name__] = self.fget(instance)
return result
class cached_property(object):
"""Decorator for read-only properties evaluated only once.
It can be used to create a cached property like this::
import random
# the class containing the property must be a new-style class
class MyClass(object):
# create property whose value is cached
@cached_property()
def randint(self):
# will only be evaluated once.
return random.randint(0, 100)
The value is cached in the '_cache' attribute of the object instance that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator. Each entry in the cache is
created only when the property is accessed for the first time and is the last
computed property value.
To expire a cached property value manually just do::
del instance._cache[<property name>]
inspired by the recipe by Christopher Arndt in the PythonDecoratorLibrary
"""
def __call__(self, fget):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"Use of deprecated decorator cached_property! Use lazyproperty instead.",
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
self.fget = fget
self.__doc__ = fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
return self
def __get__(self, inst, owner):
if not hasattr(inst, '_cache'):
inst._cache = {}
if self.__name__ not in inst._cache:
inst._cache[self.__name__] = self.fget(inst)
return inst._cache[self.__name__]
|
clld/clldutils | src/clldutils/misc.py | slug | python | def slug(s, remove_whitespace=True, lowercase=True):
res = ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
if lowercase:
res = res.lower()
for c in string.punctuation:
res = res.replace(c, '')
res = re.sub('\s+', '' if remove_whitespace else ' ', res)
res = res.encode('ascii', 'ignore').decode('ascii')
assert re.match('[ A-Za-z0-9]*$', res)
return res | Condensed version of s, containing only lowercase alphanumeric characters. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L139-L150 | null | """Generic utility functions."""
from __future__ import unicode_literals, division, absolute_import
import re
import string
import unicodedata
from mimetypes import guess_type
from base64 import b64encode
import warnings
from clldutils._compat import pathlib
from six import PY3, string_types, text_type, binary_type, iteritems
__all__ = [
'data_url', 'log_or_raise', 'nfilter', 'to_binary', 'dict_merged', 'NoDefault', 'NO_DEFAULT',
'xmlchars', 'format_size', 'UnicodeMixin', 'slug', 'encoded', 'lazyproperty',
# Deprecated:
'cached_property',
]
def data_url(content, mimetype=None):
"""
Returns content encoded as base64 Data URI.
:param content: bytes or str or Path
:param mimetype: mimetype for
:return: str object (consisting only of ASCII, though)
.. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme
"""
if isinstance(content, pathlib.Path):
if not mimetype:
mimetype = guess_type(content.name)[0]
with content.open('rb') as fp:
content = fp.read()
else:
if isinstance(content, text_type):
content = content.encode('utf8')
return "data:{0};base64,{1}".format(
mimetype or 'application/octet-stream', b64encode(content).decode())
def log_or_raise(msg, log=None, level='warn', exception_cls=ValueError):
if log:
getattr(log, level)(msg)
else:
raise exception_cls(msg)
def nfilter(seq):
"""Replacement for python 2's filter(None, seq).
:return: a list filtered from seq containing only truthy items.
"""
return [e for e in seq if e]
def to_binary(s, encoding='utf8'):
"""Portable cast function.
In python 2 the ``str`` function which is used to coerce objects to bytes does not
accept an encoding argument, whereas python 3's ``bytes`` function requires one.
:param s: object to be converted to binary_type
:return: binary_type instance, representing s.
"""
if PY3: # pragma: no cover
return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding)
return binary_type(s) # pragma: no cover
def dict_merged(d, _filter=None, **kw):
"""Update dictionary d with the items passed as kw if the value passes _filter."""
def f(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in iteritems(kw):
if f(v):
d[k] = v
return d
class NoDefault(object):
def __repr__(self):
return '<NoDefault>'
#: A singleton which can be used to distinguish no-argument-passed from None passed as
#: argument in callables with optional arguments.
NO_DEFAULT = NoDefault()
def xmlchars(text):
"""Not all of UTF-8 is considered valid character data in XML ...
Thus, this function can be used to remove illegal characters from ``text``.
"""
invalid = list(range(0x9))
invalid.extend([0xb, 0xc])
invalid.extend(range(0xe, 0x20))
return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
def format_size(num):
"""Format byte-sizes.
.. seealso:: http://stackoverflow.com/a/1094933
"""
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class UnicodeMixin(object):
"""Portable label mixin."""
def __unicode__(self):
"""a human readable label for the object."""
return '%s' % self # pragma: no cover
if PY3:
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__()
else: # pragma: no cover
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__().encode('utf-8')
def encoded(string, encoding='utf-8'):
"""Cast string to binary_type.
:param string: six.binary_type or six.text_type
:param encoding: encoding which the object is forced to
:return: six.binary_type
"""
assert isinstance(string, string_types) or isinstance(string, binary_type)
if isinstance(string, text_type):
return string.encode(encoding)
try:
# make sure the string can be decoded in the specified encoding ...
string.decode(encoding)
return string
except UnicodeDecodeError:
# ... if not use latin1 as best guess to decode the string before encoding as
# specified.
return string.decode('latin1').encode(encoding)
# NOTE: this can probably replace cached_property below in most cases (consider deprecation)
# - no call overhead after caching (cached attribute is just a plain instance attribute)
# - no additional dict for caching (just delete the instance attribute to expire manually)
# - no AttributeError when trying to access the attribute on the class
# - no parenthesis for usage
class lazyproperty(object):
"""Non-data descriptor caching the computed result as instance attribute.
>>> class Spam(object):
... @lazyproperty
... def eggs(self):
... return 'spamspamspam'
>>> spam=Spam(); spam.eggs
'spamspamspam'
>>> spam.eggs='eggseggseggs'; spam.eggs
'eggseggseggs'
>>> Spam().eggs
'spamspamspam'
>>> Spam.eggs # doctest: +ELLIPSIS
<...lazyproperty object at 0x...>
"""
def __init__(self, fget):
self.fget = fget
for attr in ('__module__', '__name__', '__doc__'):
setattr(self, attr, getattr(fget, attr))
def __get__(self, instance, owner):
if instance is None:
return self
result = instance.__dict__[self.__name__] = self.fget(instance)
return result
class cached_property(object):
"""Decorator for read-only properties evaluated only once.
It can be used to create a cached property like this::
import random
# the class containing the property must be a new-style class
class MyClass(object):
# create property whose value is cached
@cached_property()
def randint(self):
# will only be evaluated once.
return random.randint(0, 100)
The value is cached in the '_cache' attribute of the object instance that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator. Each entry in the cache is
created only when the property is accessed for the first time and is the last
computed property value.
To expire a cached property value manually just do::
del instance._cache[<property name>]
inspired by the recipe by Christopher Arndt in the PythonDecoratorLibrary
"""
def __call__(self, fget):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"Use of deprecated decorator cached_property! Use lazyproperty instead.",
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
self.fget = fget
self.__doc__ = fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
return self
def __get__(self, inst, owner):
if not hasattr(inst, '_cache'):
inst._cache = {}
if self.__name__ not in inst._cache:
inst._cache[self.__name__] = self.fget(inst)
return inst._cache[self.__name__]
|
clld/clldutils | src/clldutils/misc.py | encoded | python | def encoded(string, encoding='utf-8'):
assert isinstance(string, string_types) or isinstance(string, binary_type)
if isinstance(string, text_type):
return string.encode(encoding)
try:
# make sure the string can be decoded in the specified encoding ...
string.decode(encoding)
return string
except UnicodeDecodeError:
# ... if not use latin1 as best guess to decode the string before encoding as
# specified.
return string.decode('latin1').encode(encoding) | Cast string to binary_type.
:param string: six.binary_type or six.text_type
:param encoding: encoding which the object is forced to
:return: six.binary_type | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L153-L170 | null | """Generic utility functions."""
from __future__ import unicode_literals, division, absolute_import
import re
import string
import unicodedata
from mimetypes import guess_type
from base64 import b64encode
import warnings
from clldutils._compat import pathlib
from six import PY3, string_types, text_type, binary_type, iteritems
__all__ = [
'data_url', 'log_or_raise', 'nfilter', 'to_binary', 'dict_merged', 'NoDefault', 'NO_DEFAULT',
'xmlchars', 'format_size', 'UnicodeMixin', 'slug', 'encoded', 'lazyproperty',
# Deprecated:
'cached_property',
]
def data_url(content, mimetype=None):
"""
Returns content encoded as base64 Data URI.
:param content: bytes or str or Path
:param mimetype: mimetype for
:return: str object (consisting only of ASCII, though)
.. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme
"""
if isinstance(content, pathlib.Path):
if not mimetype:
mimetype = guess_type(content.name)[0]
with content.open('rb') as fp:
content = fp.read()
else:
if isinstance(content, text_type):
content = content.encode('utf8')
return "data:{0};base64,{1}".format(
mimetype or 'application/octet-stream', b64encode(content).decode())
def log_or_raise(msg, log=None, level='warn', exception_cls=ValueError):
if log:
getattr(log, level)(msg)
else:
raise exception_cls(msg)
def nfilter(seq):
"""Replacement for python 2's filter(None, seq).
:return: a list filtered from seq containing only truthy items.
"""
return [e for e in seq if e]
def to_binary(s, encoding='utf8'):
"""Portable cast function.
In python 2 the ``str`` function which is used to coerce objects to bytes does not
accept an encoding argument, whereas python 3's ``bytes`` function requires one.
:param s: object to be converted to binary_type
:return: binary_type instance, representing s.
"""
if PY3: # pragma: no cover
return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding)
return binary_type(s) # pragma: no cover
def dict_merged(d, _filter=None, **kw):
"""Update dictionary d with the items passed as kw if the value passes _filter."""
def f(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in iteritems(kw):
if f(v):
d[k] = v
return d
class NoDefault(object):
def __repr__(self):
return '<NoDefault>'
#: A singleton which can be used to distinguish no-argument-passed from None passed as
#: argument in callables with optional arguments.
NO_DEFAULT = NoDefault()
def xmlchars(text):
"""Not all of UTF-8 is considered valid character data in XML ...
Thus, this function can be used to remove illegal characters from ``text``.
"""
invalid = list(range(0x9))
invalid.extend([0xb, 0xc])
invalid.extend(range(0xe, 0x20))
return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
def format_size(num):
"""Format byte-sizes.
.. seealso:: http://stackoverflow.com/a/1094933
"""
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class UnicodeMixin(object):
"""Portable label mixin."""
def __unicode__(self):
"""a human readable label for the object."""
return '%s' % self # pragma: no cover
if PY3:
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__()
else: # pragma: no cover
def __str__(self):
"""a human readable label for the object, appropriately encoded (or not)."""
return self.__unicode__().encode('utf-8')
def slug(s, remove_whitespace=True, lowercase=True):
"""Condensed version of s, containing only lowercase alphanumeric characters."""
res = ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
if lowercase:
res = res.lower()
for c in string.punctuation:
res = res.replace(c, '')
res = re.sub('\s+', '' if remove_whitespace else ' ', res)
res = res.encode('ascii', 'ignore').decode('ascii')
assert re.match('[ A-Za-z0-9]*$', res)
return res
# NOTE: this can probably replace cached_property below in most cases (consider deprecation)
# - no call overhead after caching (cached attribute is just a plain instance attribute)
# - no additional dict for caching (just delete the instance attribute to expire manually)
# - no AttributeError when trying to access the attribute on the class
# - no parenthesis for usage
class lazyproperty(object):
"""Non-data descriptor caching the computed result as instance attribute.
>>> class Spam(object):
... @lazyproperty
... def eggs(self):
... return 'spamspamspam'
>>> spam=Spam(); spam.eggs
'spamspamspam'
>>> spam.eggs='eggseggseggs'; spam.eggs
'eggseggseggs'
>>> Spam().eggs
'spamspamspam'
>>> Spam.eggs # doctest: +ELLIPSIS
<...lazyproperty object at 0x...>
"""
def __init__(self, fget):
self.fget = fget
for attr in ('__module__', '__name__', '__doc__'):
setattr(self, attr, getattr(fget, attr))
def __get__(self, instance, owner):
if instance is None:
return self
result = instance.__dict__[self.__name__] = self.fget(instance)
return result
class cached_property(object):
"""Decorator for read-only properties evaluated only once.
It can be used to create a cached property like this::
import random
# the class containing the property must be a new-style class
class MyClass(object):
# create property whose value is cached
@cached_property()
def randint(self):
# will only be evaluated once.
return random.randint(0, 100)
The value is cached in the '_cache' attribute of the object instance that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator. Each entry in the cache is
created only when the property is accessed for the first time and is the last
computed property value.
To expire a cached property value manually just do::
del instance._cache[<property name>]
inspired by the recipe by Christopher Arndt in the PythonDecoratorLibrary
"""
def __call__(self, fget):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"Use of deprecated decorator cached_property! Use lazyproperty instead.",
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
self.fget = fget
self.__doc__ = fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
return self
def __get__(self, inst, owner):
if not hasattr(inst, '_cache'):
inst._cache = {}
if self.__name__ not in inst._cache:
inst._cache[self.__name__] = self.fget(inst)
return inst._cache[self.__name__]
|
clld/clldutils | src/clldutils/path.py | readlines | python | def readlines(p,
encoding=None,
strip=False,
comment=None,
normalize=None,
linenumbers=False):
if comment:
strip = True
if isinstance(p, (list, tuple)):
res = [l.decode(encoding) if encoding else l for l in p]
else:
with Path(p).open(encoding=encoding or 'utf-8') as fp:
res = fp.readlines()
if strip:
res = [l.strip() or None for l in res]
if comment:
res = [None if l and l.startswith(comment) else l for l in res]
if normalize:
res = [unicodedata.normalize(normalize, l) if l else l for l in res]
if linenumbers:
return [(n, l) for n, l in enumerate(res, 1)]
return [l for l in res if l is not None] | Read a `list` of lines from a text file.
:param p: File path (or `list` or `tuple` of text)
:param encoding: Registered codec.
:param strip: If `True`, strip leading and trailing whitespace.
:param comment: String used as syntax to mark comment lines. When not `None`, \
commented lines will be stripped. This implies `strip=True`.
:param normalize: 'NFC', 'NFKC', 'NFD', 'NFKD'
:param linenumbers: return also line numbers.
:return: `list` of text lines or pairs (`int`, text or `None`). | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/path.py#L96-L129 | null | from __future__ import unicode_literals
import io
import os
import sys
import mmap
import shutil
import hashlib
import tempfile
import importlib
import contextlib
import subprocess
import unicodedata
from six import PY3, string_types, binary_type, text_type, iteritems
from clldutils._compat import pathlib
from clldutils.misc import UnicodeMixin
Path = pathlib.Path
@contextlib.contextmanager
def sys_path(p):
p = Path(p).as_posix()
sys.path.insert(0, p)
yield
sys.path.pop(0)
@contextlib.contextmanager
def memorymapped(filename, access=mmap.ACCESS_READ):
f = io.open(as_posix(filename), 'rb')
try:
m = mmap.mmap(f.fileno(), 0, access=access)
try:
yield m
finally:
m.close()
finally:
f.close()
def import_module(p):
with sys_path(p.parent):
m = importlib.import_module(p.stem)
if Path(m.__file__).parent not in [p.parent, p]:
# If we end up importing from the wrong place, raise an error:
raise ImportError(m.__file__) # pragma: no cover
return m
# In python 3, pathlib treats path components and string-like representations or
# attributes of paths (like name and stem) as unicode strings. Unfortunately this is not
# true for pathlib under python 2.7. So as workaround for the case of using non-ASCII
# path names with python 2.7 the following two wrapper functions are provided.
# Note that the issue is even more complex, because pathlib with python 2.7 under windows
# may still pose problems.
def path_component(s, encoding='utf-8'):
if isinstance(s, binary_type) and PY3:
s = s.decode(encoding)
if isinstance(s, text_type) and not PY3: # pragma: no cover
s = s.encode(encoding)
return s
def as_unicode(p, encoding='utf-8'):
if PY3:
return '%s' % p
return (b'%s' % p).decode(encoding) # pragma: no cover
def as_posix(p):
if hasattr(p, 'as_posix'):
return p.as_posix()
elif isinstance(p, string_types):
return Path(p).as_posix()
raise ValueError(p)
def remove(p):
os.remove(as_posix(p))
def read_text(p, encoding='utf8', **kw):
with Path(p).open(encoding=encoding, **kw) as fp:
return fp.read()
def write_text(p, text, encoding='utf8', **kw):
with Path(p).open('w', encoding=encoding, **kw) as fp:
return fp.write(text)
def rmtree(p, **kw):
return shutil.rmtree(as_posix(p), **kw)
def move(src, dst):
return shutil.move(as_posix(src), as_posix(dst))
def copy(src, dst):
return shutil.copy(as_posix(src), as_posix(dst))
def copytree(src, dst, **kw):
return shutil.copytree(as_posix(src), as_posix(dst), **kw)
def walk(p, mode='all', **kw):
"""Wrapper for `os.walk`, yielding `Path` objects.
:param p: root of the directory tree to walk.
:param mode: 'all|dirs|files', defaulting to 'all'.
:param kw: Keyword arguments are passed to `os.walk`.
:return: Generator for the requested Path objects.
"""
for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw):
if mode in ('all', 'dirs'):
for dirname in dirnames:
yield Path(dirpath).joinpath(dirname)
if mode in ('all', 'files'):
for fname in filenames:
yield Path(dirpath).joinpath(fname)
def md5(p, bufsize=32768):
hash_md5 = hashlib.md5()
with io.open(Path(p).as_posix(), 'rb') as fp:
for chunk in iter(lambda: fp.read(bufsize), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class Manifest(dict, UnicodeMixin):
"""A `dict` mapping relative path names to md5 sums of file contents.
A `Manifest.from_dir(d, relative_to=d.parent).__unicode__()` is equivalent
to the content of the file `manifest-md5.txt` of the BagIt specification.
.. seealso:: https://en.wikipedia.org/wiki/BagIt
"""
@classmethod
def from_dir(cls, d, relative_to=None):
d = Path(d)
assert d.is_dir()
return cls(
(p.relative_to(relative_to or d).as_posix(), md5(p))
for p in walk(d, mode='files'))
def __unicode__(self):
return '\n'.join('{0} {1}'.format(v, k)
for k, v in sorted(iteritems(self)))
def write(self, outdir=None):
path = Path(outdir or '.') / 'manifest-md5.txt'
write_text(path, '{0}'.format(self))
def git_describe(dir_):
dir_ = Path(dir_)
if not dir_.exists():
raise ValueError('cannot describe non-existent directory')
dir_ = dir_.resolve()
cmd = [
'git', '--git-dir=%s' % dir_.joinpath('.git').as_posix(), 'describe',
'--always', '--tags']
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
res = stdout.strip() # pragma: no cover
else:
raise ValueError(stderr)
except ValueError:
res = dir_.name
if not isinstance(res, text_type):
res = res.decode('utf8')
return res
class TemporaryDirectory(object):
"""A trimmed down backport of python 3's tempfile.TemporaryDirectory."""
def __init__(self, **kw):
self.name = Path(tempfile.mkdtemp(**kw))
def __enter__(self):
return self.name
def __exit__(self, exc_type, exc_val, exc_tb):
rmtree(self.name)
|
clld/clldutils | src/clldutils/path.py | walk | python | def walk(p, mode='all', **kw):
for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw):
if mode in ('all', 'dirs'):
for dirname in dirnames:
yield Path(dirpath).joinpath(dirname)
if mode in ('all', 'files'):
for fname in filenames:
yield Path(dirpath).joinpath(fname) | Wrapper for `os.walk`, yielding `Path` objects.
:param p: root of the directory tree to walk.
:param mode: 'all|dirs|files', defaulting to 'all'.
:param kw: Keyword arguments are passed to `os.walk`.
:return: Generator for the requested Path objects. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/path.py#L148-L162 | [
"def as_posix(p):\n if hasattr(p, 'as_posix'):\n return p.as_posix()\n elif isinstance(p, string_types):\n return Path(p).as_posix()\n raise ValueError(p)\n"
] | from __future__ import unicode_literals
import io
import os
import sys
import mmap
import shutil
import hashlib
import tempfile
import importlib
import contextlib
import subprocess
import unicodedata
from six import PY3, string_types, binary_type, text_type, iteritems
from clldutils._compat import pathlib
from clldutils.misc import UnicodeMixin
Path = pathlib.Path
@contextlib.contextmanager
def sys_path(p):
p = Path(p).as_posix()
sys.path.insert(0, p)
yield
sys.path.pop(0)
@contextlib.contextmanager
def memorymapped(filename, access=mmap.ACCESS_READ):
f = io.open(as_posix(filename), 'rb')
try:
m = mmap.mmap(f.fileno(), 0, access=access)
try:
yield m
finally:
m.close()
finally:
f.close()
def import_module(p):
with sys_path(p.parent):
m = importlib.import_module(p.stem)
if Path(m.__file__).parent not in [p.parent, p]:
# If we end up importing from the wrong place, raise an error:
raise ImportError(m.__file__) # pragma: no cover
return m
# In python 3, pathlib treats path components and string-like representations or
# attributes of paths (like name and stem) as unicode strings. Unfortunately this is not
# true for pathlib under python 2.7. So as workaround for the case of using non-ASCII
# path names with python 2.7 the following two wrapper functions are provided.
# Note that the issue is even more complex, because pathlib with python 2.7 under windows
# may still pose problems.
def path_component(s, encoding='utf-8'):
if isinstance(s, binary_type) and PY3:
s = s.decode(encoding)
if isinstance(s, text_type) and not PY3: # pragma: no cover
s = s.encode(encoding)
return s
def as_unicode(p, encoding='utf-8'):
if PY3:
return '%s' % p
return (b'%s' % p).decode(encoding) # pragma: no cover
def as_posix(p):
if hasattr(p, 'as_posix'):
return p.as_posix()
elif isinstance(p, string_types):
return Path(p).as_posix()
raise ValueError(p)
def remove(p):
os.remove(as_posix(p))
def read_text(p, encoding='utf8', **kw):
with Path(p).open(encoding=encoding, **kw) as fp:
return fp.read()
def write_text(p, text, encoding='utf8', **kw):
with Path(p).open('w', encoding=encoding, **kw) as fp:
return fp.write(text)
def readlines(p,
encoding=None,
strip=False,
comment=None,
normalize=None,
linenumbers=False):
"""
Read a `list` of lines from a text file.
:param p: File path (or `list` or `tuple` of text)
:param encoding: Registered codec.
:param strip: If `True`, strip leading and trailing whitespace.
:param comment: String used as syntax to mark comment lines. When not `None`, \
commented lines will be stripped. This implies `strip=True`.
:param normalize: 'NFC', 'NFKC', 'NFD', 'NFKD'
:param linenumbers: return also line numbers.
:return: `list` of text lines or pairs (`int`, text or `None`).
"""
if comment:
strip = True
if isinstance(p, (list, tuple)):
res = [l.decode(encoding) if encoding else l for l in p]
else:
with Path(p).open(encoding=encoding or 'utf-8') as fp:
res = fp.readlines()
if strip:
res = [l.strip() or None for l in res]
if comment:
res = [None if l and l.startswith(comment) else l for l in res]
if normalize:
res = [unicodedata.normalize(normalize, l) if l else l for l in res]
if linenumbers:
return [(n, l) for n, l in enumerate(res, 1)]
return [l for l in res if l is not None]
def rmtree(p, **kw):
return shutil.rmtree(as_posix(p), **kw)
def move(src, dst):
return shutil.move(as_posix(src), as_posix(dst))
def copy(src, dst):
return shutil.copy(as_posix(src), as_posix(dst))
def copytree(src, dst, **kw):
return shutil.copytree(as_posix(src), as_posix(dst), **kw)
def md5(p, bufsize=32768):
hash_md5 = hashlib.md5()
with io.open(Path(p).as_posix(), 'rb') as fp:
for chunk in iter(lambda: fp.read(bufsize), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class Manifest(dict, UnicodeMixin):
"""A `dict` mapping relative path names to md5 sums of file contents.
A `Manifest.from_dir(d, relative_to=d.parent).__unicode__()` is equivalent
to the content of the file `manifest-md5.txt` of the BagIt specification.
.. seealso:: https://en.wikipedia.org/wiki/BagIt
"""
@classmethod
def from_dir(cls, d, relative_to=None):
d = Path(d)
assert d.is_dir()
return cls(
(p.relative_to(relative_to or d).as_posix(), md5(p))
for p in walk(d, mode='files'))
def __unicode__(self):
return '\n'.join('{0} {1}'.format(v, k)
for k, v in sorted(iteritems(self)))
def write(self, outdir=None):
path = Path(outdir or '.') / 'manifest-md5.txt'
write_text(path, '{0}'.format(self))
def git_describe(dir_):
dir_ = Path(dir_)
if not dir_.exists():
raise ValueError('cannot describe non-existent directory')
dir_ = dir_.resolve()
cmd = [
'git', '--git-dir=%s' % dir_.joinpath('.git').as_posix(), 'describe',
'--always', '--tags']
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
res = stdout.strip() # pragma: no cover
else:
raise ValueError(stderr)
except ValueError:
res = dir_.name
if not isinstance(res, text_type):
res = res.decode('utf8')
return res
class TemporaryDirectory(object):
"""A trimmed down backport of python 3's tempfile.TemporaryDirectory."""
def __init__(self, **kw):
self.name = Path(tempfile.mkdtemp(**kw))
def __enter__(self):
return self.name
def __exit__(self, exc_type, exc_val, exc_tb):
rmtree(self.name)
|
clld/clldutils | src/clldutils/source.py | Source.bibtex | python | def bibtex(self):
m = max(itertools.chain(map(len, self), [0]))
fields = (" %s = {%s}" % (k.ljust(m), self[k]) for k in self)
return "@%s{%s,\n%s\n}" % (
getattr(self.genre, 'value', self.genre), self.id, ",\n".join(fields)) | Represent the source in BibTeX format.
:return: string encoding the source in BibTeX syntax. | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/source.py#L109-L117 | null | class Source(collections.OrderedDict, UnicodeMixin):
"""Bibliographic metadata about a source used for some analysis in a linguistic database.
Following BibTeX-style, a Source is just an ordered list of key-value pairs, augmented
with an id (a.k.a. BibTeX citekey) and a genre (a.k.a. Entry Types).
.. note::
We do restrict the allowed syntax for the id to make sure it can safely be used
as path component in a URL. To skip this check, pass `_check_id=False` to the
constructor.
"""
def __init__(self, genre, id_, *args, **kw):
if kw.pop('_check_id', True) and not ID_PATTERN.match(id_):
raise ValueError(id_)
self.genre = genre
self.id = id_
super(Source, self).__init__(*args, **kw)
def __bool__(self): # pragma: no cover
return True
__nonzero__ = __bool__
def __unicode__(self):
return self.text()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.id)
@classmethod
def from_entry(cls, key, entry):
"""
Factory method to initialize a `Source` instance from a `pybtex.database.Entry`.
:param key: Citation key, e.g. a key in `pybtex.database.BibliographyData.entries`.
:param entry: `pybtex.database.Entry`
"""
kw = {k: v for k, v in entry.fields.items()}
for role in (entry.persons or []):
if entry.persons[role]:
kw[role] = ' and '.join('%s' % p for p in entry.persons[role])
return cls(entry.type, key, **kw)
@classmethod
def from_bibtex(cls, bibtexString, lowercase=False, _check_id=True):
source = None
# the following patterns are designed to match preprocessed input lines.
# i.e. the configuration values given in the bibtool resource file used to
# generate the bib-file have to correspond to these patterns.
# in particular, we assume all key-value-pairs to fit on one line,
# because we don't want to deal with nested curly braces!
lines = bibtexString.strip().split('\n')
# genre and key are parsed from the @-line:
atLine = re.compile("^@(?P<genre>[a-zA-Z_]+)\s*{\s*(?P<key>[^,]*)\s*,\s*")
# since all key-value pairs fit on one line, it's easy to determine the
# end of the value: right before the last closing brace!
fieldLine = re.compile('\s*(?P<field>[a-zA-Z_]+)\s*=\s*(\{|")(?P<value>.+)')
endLine = re.compile("}\s*")
while lines:
line = lines.pop(0)
if not source:
m = atLine.match(line)
if m:
source = cls(
m.group('genre').strip().lower(),
m.group('key').strip(),
_check_id=_check_id)
else:
m = fieldLine.match(line)
if m:
value = m.group('value').strip()
if value.endswith(','):
value = value[:-1].strip()
if value.endswith('}') or value.endswith('"'):
field = m.group('field')
if lowercase:
field = field.lower()
source[field] = value[:-1].strip()
else:
m = endLine.match(line)
if m:
break
# Note: fields with names not matching the expected pattern are simply
# ignored.
return source
_genre_note = {
'phdthesis': 'Doctoral dissertation',
'mastersthesis': 'MA thesis',
'unpublished': 'unpublished',
}
def get_with_translation(self, key):
res = self.get(key)
if res and self.get(key + '_english'):
res = '{0} [{1}]'.format(res, self.get(key + '_english'))
return res
def text(self):
"""Linearize the bib source according to the rules of the unified style.
Book:
author. year. booktitle. (series, volume.) address: publisher.
Article:
author. year. title. journal volume(issue). pages.
Incollection:
author. year. title. In editor (ed.), booktitle, pages. address: publisher.
.. seealso::
http://celxj.org/downloads/UnifiedStyleSheet.pdf
https://github.com/citation-style-language/styles/blob/master/\
unified-style-linguistics.csl
"""
genre = getattr(self.genre, 'value', self.genre)
pages_at_end = genre in (
'book',
'phdthesis',
'mastersthesis',
'misc',
'techreport')
thesis = genre in ('phdthesis', 'mastersthesis')
if self.get('editor'):
editors = self['editor']
affix = 'eds' if ' and ' in editors or '&' in editors else 'ed'
editors = " %s (%s.)" % (editors, affix)
else:
editors = None
res = [self.get('author', editors), self.get('year', 'n.d')]
if genre == 'book':
res.append(self.get_with_translation('booktitle') or
self.get_with_translation('title'))
series = ', '.join(filter(None, [self.get('series'), self.get('volume')]))
if series:
res.append('(%s.)' % series)
elif genre == 'misc':
# in case of misc records, we use the note field in case a title is missing.
res.append(self.get_with_translation('title') or self.get('note'))
else:
res.append(self.get_with_translation('title'))
if genre == 'article':
atom = ' '.join(filter(None, [self.get('journal'), self.get('volume')]))
if self.get('issue'):
atom += '(%s)' % self['issue']
res.append(atom)
res.append(self.get('pages'))
elif genre == 'incollection' or genre == 'inproceedings':
prefix = 'In'
atom = ''
if editors:
atom += editors
if self.get('booktitle'):
if atom:
atom += ','
atom += " %s" % self.get_with_translation('booktitle')
if self.get('pages'):
atom += ", %s" % self['pages']
res.append(prefix + atom)
else:
# check for author to make sure we haven't included the editors yet.
if editors and self.get('author'):
res.append("In %s" % editors)
for attr in [
'journal',
'volume' if genre != 'book' else None,
]:
if attr and self.get(attr):
res.append(self.get(attr))
if self.get('issue'):
res.append("(%s)" % self['issue'])
if not pages_at_end and self.get('pages'):
res.append(self['pages'])
if self.get('publisher'):
res.append(": ".join(filter(None, [self.get('address'), self['publisher']])))
else:
if genre == 'misc' and self.get('howpublished'):
res.append(self.get('howpublished'))
if not thesis and pages_at_end and self.get('pages'):
res.append(self['pages'] + 'pp')
note = self.get('note') or self._genre_note.get(genre)
if note and note not in res:
if thesis:
joiner = ','
if self.get('school'):
note += '{0} {1}'.format(joiner, self.get('school'))
joiner = ';'
if self.get('pages'):
note += '{0} {1}pp.'.format(joiner, self.get('pages'))
res.append('(%s)' % note)
return ' '.join(
x if x.endswith(('.', '.)')) else '%s.' % x for x in res if x)
|
clld/clldutils | src/clldutils/source.py | Source.text | python | def text(self):
genre = getattr(self.genre, 'value', self.genre)
pages_at_end = genre in (
'book',
'phdthesis',
'mastersthesis',
'misc',
'techreport')
thesis = genre in ('phdthesis', 'mastersthesis')
if self.get('editor'):
editors = self['editor']
affix = 'eds' if ' and ' in editors or '&' in editors else 'ed'
editors = " %s (%s.)" % (editors, affix)
else:
editors = None
res = [self.get('author', editors), self.get('year', 'n.d')]
if genre == 'book':
res.append(self.get_with_translation('booktitle') or
self.get_with_translation('title'))
series = ', '.join(filter(None, [self.get('series'), self.get('volume')]))
if series:
res.append('(%s.)' % series)
elif genre == 'misc':
# in case of misc records, we use the note field in case a title is missing.
res.append(self.get_with_translation('title') or self.get('note'))
else:
res.append(self.get_with_translation('title'))
if genre == 'article':
atom = ' '.join(filter(None, [self.get('journal'), self.get('volume')]))
if self.get('issue'):
atom += '(%s)' % self['issue']
res.append(atom)
res.append(self.get('pages'))
elif genre == 'incollection' or genre == 'inproceedings':
prefix = 'In'
atom = ''
if editors:
atom += editors
if self.get('booktitle'):
if atom:
atom += ','
atom += " %s" % self.get_with_translation('booktitle')
if self.get('pages'):
atom += ", %s" % self['pages']
res.append(prefix + atom)
else:
# check for author to make sure we haven't included the editors yet.
if editors and self.get('author'):
res.append("In %s" % editors)
for attr in [
'journal',
'volume' if genre != 'book' else None,
]:
if attr and self.get(attr):
res.append(self.get(attr))
if self.get('issue'):
res.append("(%s)" % self['issue'])
if not pages_at_end and self.get('pages'):
res.append(self['pages'])
if self.get('publisher'):
res.append(": ".join(filter(None, [self.get('address'), self['publisher']])))
else:
if genre == 'misc' and self.get('howpublished'):
res.append(self.get('howpublished'))
if not thesis and pages_at_end and self.get('pages'):
res.append(self['pages'] + 'pp')
note = self.get('note') or self._genre_note.get(genre)
if note and note not in res:
if thesis:
joiner = ','
if self.get('school'):
note += '{0} {1}'.format(joiner, self.get('school'))
joiner = ';'
if self.get('pages'):
note += '{0} {1}pp.'.format(joiner, self.get('pages'))
res.append('(%s)' % note)
return ' '.join(
x if x.endswith(('.', '.)')) else '%s.' % x for x in res if x) | Linearize the bib source according to the rules of the unified style.
Book:
author. year. booktitle. (series, volume.) address: publisher.
Article:
author. year. title. journal volume(issue). pages.
Incollection:
author. year. title. In editor (ed.), booktitle, pages. address: publisher.
.. seealso::
http://celxj.org/downloads/UnifiedStyleSheet.pdf
https://github.com/citation-style-language/styles/blob/master/\
unified-style-linguistics.csl | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/source.py#L131-L235 | null | class Source(collections.OrderedDict, UnicodeMixin):
"""Bibliographic metadata about a source used for some analysis in a linguistic database.
Following BibTeX-style, a Source is just an ordered list of key-value pairs, augmented
with an id (a.k.a. BibTeX citekey) and a genre (a.k.a. Entry Types).
.. note::
We do restrict the allowed syntax for the id to make sure it can safely be used
as path component in a URL. To skip this check, pass `_check_id=False` to the
constructor.
"""
def __init__(self, genre, id_, *args, **kw):
if kw.pop('_check_id', True) and not ID_PATTERN.match(id_):
raise ValueError(id_)
self.genre = genre
self.id = id_
super(Source, self).__init__(*args, **kw)
def __bool__(self): # pragma: no cover
return True
__nonzero__ = __bool__
def __unicode__(self):
return self.text()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.id)
@classmethod
def from_entry(cls, key, entry):
"""
Factory method to initialize a `Source` instance from a `pybtex.database.Entry`.
:param key: Citation key, e.g. a key in `pybtex.database.BibliographyData.entries`.
:param entry: `pybtex.database.Entry`
"""
kw = {k: v for k, v in entry.fields.items()}
for role in (entry.persons or []):
if entry.persons[role]:
kw[role] = ' and '.join('%s' % p for p in entry.persons[role])
return cls(entry.type, key, **kw)
@classmethod
def from_bibtex(cls, bibtexString, lowercase=False, _check_id=True):
source = None
# the following patterns are designed to match preprocessed input lines.
# i.e. the configuration values given in the bibtool resource file used to
# generate the bib-file have to correspond to these patterns.
# in particular, we assume all key-value-pairs to fit on one line,
# because we don't want to deal with nested curly braces!
lines = bibtexString.strip().split('\n')
# genre and key are parsed from the @-line:
atLine = re.compile("^@(?P<genre>[a-zA-Z_]+)\s*{\s*(?P<key>[^,]*)\s*,\s*")
# since all key-value pairs fit on one line, it's easy to determine the
# end of the value: right before the last closing brace!
fieldLine = re.compile('\s*(?P<field>[a-zA-Z_]+)\s*=\s*(\{|")(?P<value>.+)')
endLine = re.compile("}\s*")
while lines:
line = lines.pop(0)
if not source:
m = atLine.match(line)
if m:
source = cls(
m.group('genre').strip().lower(),
m.group('key').strip(),
_check_id=_check_id)
else:
m = fieldLine.match(line)
if m:
value = m.group('value').strip()
if value.endswith(','):
value = value[:-1].strip()
if value.endswith('}') or value.endswith('"'):
field = m.group('field')
if lowercase:
field = field.lower()
source[field] = value[:-1].strip()
else:
m = endLine.match(line)
if m:
break
# Note: fields with names not matching the expected pattern are simply
# ignored.
return source
def bibtex(self):
"""Represent the source in BibTeX format.
:return: string encoding the source in BibTeX syntax.
"""
m = max(itertools.chain(map(len, self), [0]))
fields = (" %s = {%s}" % (k.ljust(m), self[k]) for k in self)
return "@%s{%s,\n%s\n}" % (
getattr(self.genre, 'value', self.genre), self.id, ",\n".join(fields))
_genre_note = {
'phdthesis': 'Doctoral dissertation',
'mastersthesis': 'MA thesis',
'unpublished': 'unpublished',
}
def get_with_translation(self, key):
res = self.get(key)
if res and self.get(key + '_english'):
res = '{0} [{1}]'.format(res, self.get(key + '_english'))
return res
|
edx/edx-submissions | submissions/serializers.py | SubmissionSerializer.validate_answer | python | def validate_answer(self, value):
# Check that the answer is JSON-serializable
try:
serialized = json.dumps(value)
except (ValueError, TypeError):
raise serializers.ValidationError("Answer value must be JSON-serializable")
# Check the length of the serialized representation
if len(serialized) > Submission.MAXSIZE:
raise serializers.ValidationError("Maximum answer size exceeded.")
return value | Check that the answer is JSON-serializable and not too long. | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/serializers.py#L61-L75 | null | class SubmissionSerializer(serializers.ModelSerializer):
# Django Rest Framework v3 uses the Django setting `DATETIME_FORMAT`
# when serializing datetimes. This differs from v2, which always
# returned a datetime. To preserve the old behavior, we explicitly
# set `format` to None.
# http://www.django-rest-framework.org/api-guide/fields/#datetimefield
submitted_at = DateTimeField(format=None, required=False)
created_at = DateTimeField(format=None, required=False)
# Django Rest Framework v3 apparently no longer validates that
# `PositiveIntegerField`s are positive!
attempt_number = IntegerField(min_value=0)
# Prevent Django Rest Framework from converting the answer (dict or str)
# to a string.
answer = RawField()
class Meta:
model = Submission
fields = (
'uuid',
'student_item',
'attempt_number',
'submitted_at',
'created_at',
'answer',
)
|
edx/edx-submissions | submissions/serializers.py | ScoreSerializer.get_annotations | python | def get_annotations(self, obj):
annotations = ScoreAnnotation.objects.filter(score_id=obj.id)
return [
ScoreAnnotationSerializer(instance=annotation).data
for annotation in annotations
] | Inspect ScoreAnnotations to attach all relevant annotations. | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/serializers.py#L125-L133 | null | class ScoreSerializer(serializers.ModelSerializer):
# Ensure that the created_at datetime is not converted to a string.
created_at = DateTimeField(format=None, required=False)
annotations = serializers.SerializerMethodField()
class Meta:
model = Score
fields = (
'student_item',
'submission',
'points_earned',
'points_possible',
'created_at',
# Computed
'submission_uuid',
'annotations',
)
|
edx/edx-submissions | submissions/models.py | Score.create_reset_score | python | def create_reset_score(cls, student_item):
# By setting the "reset" flag, we ensure that the "highest"
# score in the score summary will point to this score.
# By setting points earned and points possible to 0,
# we ensure that this score will be hidden from the user.
return cls.objects.create(
student_item=student_item,
submission=None,
points_earned=0,
points_possible=0,
reset=True,
) | Create a "reset" score (a score with a null submission).
Only scores created after the most recent "reset" score
should be used to determine a student's effective score.
Args:
student_item (StudentItem): The student item model.
Returns:
Score: The newly created "reset" score.
Raises:
DatabaseError: An error occurred while creating the score | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/models.py#L241-L268 | null | class Score(models.Model):
"""What the user scored for a given StudentItem at a given time.
Note that while a Score *can* be tied to a Submission, it doesn't *have* to.
Specifically, if we want to have scores for things that are not a part of
the courseware (like "class participation"), there would be no corresponding
Submission.
"""
student_item = models.ForeignKey(StudentItem)
submission = models.ForeignKey(Submission, null=True)
points_earned = models.PositiveIntegerField(default=0)
points_possible = models.PositiveIntegerField(default=0)
created_at = models.DateTimeField(editable=False, default=now, db_index=True)
# Flag to indicate that this score should reset the current "highest" score
reset = models.BooleanField(default=False)
class Meta:
app_label = u"submissions"
@property
def submission_uuid(self):
"""
Retrieve the submission UUID associated with this score.
If the score isn't associated with a submission (for example, if this is
a "reset" score or a a non-courseware item like "class participation"),
then this will return None.
Returns:
str or None
"""
if self.submission is not None:
return six.text_type(self.submission.uuid)
else:
return None
def to_float(self):
"""
Calculate (points earned) / (points possible).
If points possible is None (e.g. this is a "hidden" score)
then return None.
Returns:
float or None
"""
if self.points_possible == 0:
return None
return float(self.points_earned) / self.points_possible
def __repr__(self):
return repr(dict(
student_item=self.student_item,
submission=self.submission,
created_at=self.created_at,
points_earned=self.points_earned,
points_possible=self.points_possible,
))
def is_hidden(self):
"""
By convention, a score of 0/0 is not displayed to users.
Hidden scores are filtered by the submissions API.
Returns:
bool: Whether the score should be hidden.
"""
return self.points_possible == 0
@classmethod
def __unicode__(self):
return u"{0.points_earned}/{0.points_possible}".format(self)
|
edx/edx-submissions | submissions/models.py | ScoreSummary.update_score_summary | python | def update_score_summary(sender, **kwargs):
score = kwargs['instance']
try:
score_summary = ScoreSummary.objects.get(
student_item=score.student_item
)
score_summary.latest = score
# A score with the "reset" flag set will always replace the current highest score
if score.reset:
score_summary.highest = score
# The conversion to a float may return None if points possible is zero
# In Python, None is always less than an integer, so any score
# with non-null points possible will take precedence.
elif score.to_float() > score_summary.highest.to_float():
score_summary.highest = score
score_summary.save()
except ScoreSummary.DoesNotExist:
ScoreSummary.objects.create(
student_item=score.student_item,
highest=score,
latest=score,
)
except DatabaseError as err:
logger.exception(
u"Error while updating score summary for student item {}"
.format(score.student_item)
) | Listen for new Scores and update the relevant ScoreSummary.
Args:
sender: not used
Kwargs:
instance (Score): The score model whose save triggered this receiver. | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/models.py#L286-L323 | null | class ScoreSummary(models.Model):
"""Running store of the highest and most recent Scores for a StudentItem."""
student_item = models.OneToOneField(StudentItem)
highest = models.ForeignKey(Score, related_name=u"+")
latest = models.ForeignKey(Score, related_name=u"+")
class Meta:
app_label = u"submissions"
verbose_name_plural = u"Score Summaries"
@receiver(post_save, sender=Score)
|
edx/edx-submissions | submissions/management/commands/update_submissions_uuids.py | Command.add_arguments | python | def add_arguments(self, parser):
parser.add_argument(
'--start', '-s',
default=0,
type=int,
help=u"The Submission.id at which to begin updating rows. 0 by default."
)
parser.add_argument(
'--chunk', '-c',
default=1000,
type=int,
help=u"Batch size, how many rows to update in a given transaction. Default 1000.",
)
parser.add_argument(
'--wait', '-w',
default=2,
type=int,
help=u"Wait time between transactions, in seconds. Default 2.",
) | Add arguments to the command parser.
Uses argparse syntax. See documentation at
https://docs.python.org/3/library/argparse.html. | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/management/commands/update_submissions_uuids.py#L31-L55 | null | class Command(BaseCommand):
"""
Example usage: ./manage.py lms --settings=devstack update_submissions_uuids.py
"""
help = 'Loads and saves all Submissions objects to force new non-hyphenated uuid values on disk.'
def handle(self, *args, **options):
"""
By default, we're going to do this in chunks. This way, if there ends up being an error,
we can check log messages and continue from that point after fixing the issue.
"""
# Note that by taking last_id here, we're going to miss any submissions created *during* the command execution
# But that's okay! All new entries have already been created using the new style, no acion needed there
last_id = Submission._objects.all().aggregate(Max('id'))['id__max']
log.info("Beginning uuid update")
current = options['start']
while current < last_id:
end_chunk = current + options['chunk'] if last_id - options['chunk'] >= current else last_id
log.info("Updating entries in range [{}, {}]".format(current, end_chunk))
with transaction.atomic():
for submission in Submission._objects.filter(id__gte=current, id__lte=end_chunk).iterator():
submission.save(update_fields=['uuid'])
time.sleep(options['wait'])
current = end_chunk + 1
|
edx/edx-submissions | submissions/management/commands/update_submissions_uuids.py | Command.handle | python | def handle(self, *args, **options):
# Note that by taking last_id here, we're going to miss any submissions created *during* the command execution
# But that's okay! All new entries have already been created using the new style, no acion needed there
last_id = Submission._objects.all().aggregate(Max('id'))['id__max']
log.info("Beginning uuid update")
current = options['start']
while current < last_id:
end_chunk = current + options['chunk'] if last_id - options['chunk'] >= current else last_id
log.info("Updating entries in range [{}, {}]".format(current, end_chunk))
with transaction.atomic():
for submission in Submission._objects.filter(id__gte=current, id__lte=end_chunk).iterator():
submission.save(update_fields=['uuid'])
time.sleep(options['wait'])
current = end_chunk + 1 | By default, we're going to do this in chunks. This way, if there ends up being an error,
we can check log messages and continue from that point after fixing the issue. | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/management/commands/update_submissions_uuids.py#L57-L75 | null | class Command(BaseCommand):
"""
Example usage: ./manage.py lms --settings=devstack update_submissions_uuids.py
"""
help = 'Loads and saves all Submissions objects to force new non-hyphenated uuid values on disk.'
def add_arguments(self, parser):
"""
Add arguments to the command parser.
Uses argparse syntax. See documentation at
https://docs.python.org/3/library/argparse.html.
"""
parser.add_argument(
'--start', '-s',
default=0,
type=int,
help=u"The Submission.id at which to begin updating rows. 0 by default."
)
parser.add_argument(
'--chunk', '-c',
default=1000,
type=int,
help=u"Batch size, how many rows to update in a given transaction. Default 1000.",
)
parser.add_argument(
'--wait', '-w',
default=2,
type=int,
help=u"Wait time between transactions, in seconds. Default 2.",
)
|
edx/edx-submissions | submissions/views.py | get_submissions_for_student_item | python | def get_submissions_for_student_item(request, course_id, student_id, item_id):
student_item_dict = dict(
course_id=course_id,
student_id=student_id,
item_id=item_id,
)
context = dict(**student_item_dict)
try:
submissions = get_submissions(student_item_dict)
context["submissions"] = submissions
except SubmissionRequestError:
context["error"] = "The specified student item was not found."
return render_to_response('submissions.html', context) | Retrieve all submissions associated with the given student item.
Developer utility for accessing all the submissions associated with a
student item. The student item is specified by the unique combination of
course, student, and item.
Args:
request (dict): The request.
course_id (str): The course id for this student item.
student_id (str): The student id for this student item.
item_id (str): The item id for this student item.
Returns:
HttpResponse: The response object for this request. Renders a simple
development page with all the submissions related to the specified
student item. | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/views.py#L12-L43 | [
"def get_submissions(student_item_dict, limit=None):\n \"\"\"Retrieves the submissions for the specified student item,\n ordered by most recent submitted date.\n\n Returns the submissions relative to the specified student item. Exception\n thrown if no submission is found relative to this location.\n\n Args:\n student_item_dict (dict): The location of the problem this submission is\n associated with, as defined by a course, student, and item.\n limit (int): Optional parameter for limiting the returned number of\n submissions associated with this student item. If not specified, all\n associated submissions are returned.\n\n Returns:\n List dict: A list of dicts for the associated student item. The submission\n contains five attributes: student_item, attempt_number, submitted_at,\n created_at, and answer. 'student_item' is the ID of the related student\n item for the submission. 'attempt_number' is the attempt this submission\n represents for this question. 'submitted_at' represents the time this\n submission was submitted, which can be configured, versus the\n 'created_at' date, which is when the submission is first created.\n\n Raises:\n SubmissionRequestError: Raised when the associated student item fails\n validation.\n SubmissionNotFoundError: Raised when a submission cannot be found for\n the associated student item.\n\n Examples:\n >>> student_item_dict = dict(\n >>> student_id=\"Tim\",\n >>> item_id=\"item_1\",\n >>> course_id=\"course_1\",\n >>> item_type=\"type_one\"\n >>> )\n >>> get_submissions(student_item_dict, 3)\n [{\n 'student_item': 2,\n 'attempt_number': 1,\n 'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),\n 'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),\n 'answer': u'The answer is 42.'\n }]\n\n \"\"\"\n student_item_model = _get_or_create_student_item(student_item_dict)\n try:\n submission_models = Submission.objects.filter(\n student_item=student_item_model)\n except DatabaseError:\n error_message = (\n u\"Error getting submission request for student item {}\"\n .format(student_item_dict)\n )\n logger.exception(error_message)\n raise SubmissionNotFoundError(error_message)\n\n if limit:\n submission_models = submission_models[:limit]\n\n return SubmissionSerializer(submission_models, many=True).data\n"
] | from __future__ import absolute_import
import logging
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from submissions.api import SubmissionRequestError, get_submissions
log = logging.getLogger(__name__)
@login_required()
|
edx/edx-submissions | submissions/api.py | create_submission | python | def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None):
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message) | Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
} | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/api.py#L105-L197 | [
"def _get_or_create_student_item(student_item_dict):\n \"\"\"Gets or creates a Student Item that matches the values specified.\n\n Attempts to get the specified Student Item. If it does not exist, the\n specified parameters are validated, and a new Student Item is created.\n\n Args:\n student_item_dict (dict): The dict containing the student_id, item_id,\n course_id, and item_type that uniquely defines a student item.\n\n Returns:\n StudentItem: The student item that was retrieved or created.\n\n Raises:\n SubmissionInternalError: Thrown if there was an internal error while\n attempting to create or retrieve the specified student item.\n SubmissionRequestError: Thrown if the given student item parameters fail\n validation.\n\n Examples:\n >>> student_item_dict = dict(\n >>> student_id=\"Tim\",\n >>> item_id=\"item_1\",\n >>> course_id=\"course_1\",\n >>> item_type=\"type_one\"\n >>> )\n >>> _get_or_create_student_item(student_item_dict)\n {'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}\n\n \"\"\"\n try:\n try:\n return StudentItem.objects.get(**student_item_dict)\n except StudentItem.DoesNotExist:\n student_item_serializer = StudentItemSerializer(\n data=student_item_dict\n )\n if not student_item_serializer.is_valid():\n logger.error(\n u\"Invalid StudentItemSerializer: errors:{} data:{}\".format(\n student_item_serializer.errors,\n student_item_dict\n )\n )\n raise SubmissionRequestError(field_errors=student_item_serializer.errors)\n return student_item_serializer.save()\n except DatabaseError:\n error_message = u\"An error occurred creating student item: {}\".format(\n student_item_dict\n )\n logger.exception(error_message)\n raise SubmissionInternalError(error_message)\n"
] | """
Public interface for the submissions app.
"""
from __future__ import absolute_import
import copy
import itertools
import logging
import operator
import json
from uuid import UUID
from django.conf import settings
from django.core.cache import cache
from django.db import IntegrityError, DatabaseError
from submissions.serializers import (
SubmissionSerializer, StudentItemSerializer, ScoreSerializer, UnannotatedScoreSerializer
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary, ScoreAnnotation, score_set, score_reset
import six
logger = logging.getLogger("submissions.api")
# By default, limit the number of top submissions
# Anything above this limit will result in a request error
MAX_TOP_SUBMISSIONS = 100
# Set a relatively low cache timeout for top submissions.
TOP_SUBMISSIONS_CACHE_TIMEOUT = 300
class SubmissionError(Exception):
"""An error that occurs during submission actions.
This error is raised when the submission API cannot perform a requested
action.
"""
pass
class SubmissionInternalError(SubmissionError):
"""An error internal to the Submission API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class SubmissionNotFoundError(SubmissionError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class SubmissionRequestError(SubmissionError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, msg="", field_errors=None):
"""
Configure the submission request error.
Keyword Args:
msg (unicode): The error message.
field_errors (dict): A dictionary of errors (list of unicode)
specific to a fields provided in the request.
Example usage:
>>> raise SubmissionRequestError(
>>> "An unexpected error occurred"
>>> {"answer": ["Maximum answer length exceeded."]}
>>> )
"""
super(SubmissionRequestError, self).__init__(msg)
self.field_errors = (
copy.deepcopy(field_errors)
if field_errors is not None
else {}
)
self.args += (self.field_errors,)
def __repr__(self):
"""
Show the field errors upon output.
"""
return '{}(msg="{}", field_errors={})'.format(
self.__class__.__name__, self.message, self.field_errors
)
def _get_submission_model(uuid, read_replica=False):
"""
Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query = """
SELECT
`submissions_submission`.`id`,
`submissions_submission`.`uuid`,
`submissions_submission`.`student_item_id`,
`submissions_submission`.`attempt_number`,
`submissions_submission`.`submitted_at`,
`submissions_submission`.`created_at`,
`submissions_submission`.`raw_answer`,
`submissions_submission`.`status`
FROM
`submissions_submission`
WHERE (
NOT (`submissions_submission`.`status` = 'D')
AND `submissions_submission`.`uuid` = '{}'
)
"""
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission
def get_submission(submission_uuid, read_replica=False):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
def get_submission_and_student(uuid, read_replica=False):
"""
Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
"""
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}]
"""
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data
def get_all_submissions(course_id, item_id, item_type, read_replica=True):
"""For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data
def get_all_course_submission_information(course_id, item_type, read_replica=True):
""" For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
)
def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True):
"""Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}]
"""
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions
def get_score(student_item):
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data
def get_scores(course_id, student_id):
"""Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores
def get_latest_score_for_submission(submission_uuid, read_replica=False):
"""
Retrieve the latest score for a particular submission.
Args:
submission_uuid (str): The UUID of the submission to retrieve.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
dict: The serialized score model, or None if no score is available.
"""
try:
# Ensure that submission_uuid is valid before fetching score
submission_model = _get_submission_model(submission_uuid, read_replica)
score_qs = Score.objects.filter(
submission__uuid=submission_model.uuid
).order_by("-id").select_related("submission")
if read_replica:
score_qs = _use_read_replica(score_qs)
score = score_qs[0]
if score.is_hidden():
return None
except (IndexError, Submission.DoesNotExist):
return None
return ScoreSerializer(score).data
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg)
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}
"""
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _use_read_replica(queryset):
"""
Use the read replica if it's available.
Args:
queryset (QuerySet)
Returns:
QuerySet
"""
return (
queryset.using("read_replica")
if "read_replica" in settings.DATABASES
else queryset
)
|
edx/edx-submissions | submissions/api.py | _get_submission_model | python | def _get_submission_model(uuid, read_replica=False):
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query = """
SELECT
`submissions_submission`.`id`,
`submissions_submission`.`uuid`,
`submissions_submission`.`student_item_id`,
`submissions_submission`.`attempt_number`,
`submissions_submission`.`submitted_at`,
`submissions_submission`.`created_at`,
`submissions_submission`.`raw_answer`,
`submissions_submission`.`status`
FROM
`submissions_submission`
WHERE (
NOT (`submissions_submission`.`status` = 'D')
AND `submissions_submission`.`uuid` = '{}'
)
"""
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission | Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens. | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/api.py#L200-L238 | [
"def _use_read_replica(queryset):\n \"\"\"\n Use the read replica if it's available.\n\n Args:\n queryset (QuerySet)\n\n Returns:\n QuerySet\n\n \"\"\"\n return (\n queryset.using(\"read_replica\")\n if \"read_replica\" in settings.DATABASES\n else queryset\n )\n"
] | """
Public interface for the submissions app.
"""
from __future__ import absolute_import
import copy
import itertools
import logging
import operator
import json
from uuid import UUID
from django.conf import settings
from django.core.cache import cache
from django.db import IntegrityError, DatabaseError
from submissions.serializers import (
SubmissionSerializer, StudentItemSerializer, ScoreSerializer, UnannotatedScoreSerializer
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary, ScoreAnnotation, score_set, score_reset
import six
logger = logging.getLogger("submissions.api")
# By default, limit the number of top submissions
# Anything above this limit will result in a request error
MAX_TOP_SUBMISSIONS = 100
# Set a relatively low cache timeout for top submissions.
TOP_SUBMISSIONS_CACHE_TIMEOUT = 300
class SubmissionError(Exception):
"""An error that occurs during submission actions.
This error is raised when the submission API cannot perform a requested
action.
"""
pass
class SubmissionInternalError(SubmissionError):
"""An error internal to the Submission API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class SubmissionNotFoundError(SubmissionError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class SubmissionRequestError(SubmissionError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, msg="", field_errors=None):
"""
Configure the submission request error.
Keyword Args:
msg (unicode): The error message.
field_errors (dict): A dictionary of errors (list of unicode)
specific to a fields provided in the request.
Example usage:
>>> raise SubmissionRequestError(
>>> "An unexpected error occurred"
>>> {"answer": ["Maximum answer length exceeded."]}
>>> )
"""
super(SubmissionRequestError, self).__init__(msg)
self.field_errors = (
copy.deepcopy(field_errors)
if field_errors is not None
else {}
)
self.args += (self.field_errors,)
def __repr__(self):
"""
Show the field errors upon output.
"""
return '{}(msg="{}", field_errors={})'.format(
self.__class__.__name__, self.message, self.field_errors
)
def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None):
"""Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def get_submission(submission_uuid, read_replica=False):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
def get_submission_and_student(uuid, read_replica=False):
"""
Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
"""
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}]
"""
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data
def get_all_submissions(course_id, item_id, item_type, read_replica=True):
"""For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data
def get_all_course_submission_information(course_id, item_type, read_replica=True):
""" For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
)
def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True):
"""Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}]
"""
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions
def get_score(student_item):
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data
def get_scores(course_id, student_id):
"""Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores
def get_latest_score_for_submission(submission_uuid, read_replica=False):
"""
Retrieve the latest score for a particular submission.
Args:
submission_uuid (str): The UUID of the submission to retrieve.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
dict: The serialized score model, or None if no score is available.
"""
try:
# Ensure that submission_uuid is valid before fetching score
submission_model = _get_submission_model(submission_uuid, read_replica)
score_qs = Score.objects.filter(
submission__uuid=submission_model.uuid
).order_by("-id").select_related("submission")
if read_replica:
score_qs = _use_read_replica(score_qs)
score = score_qs[0]
if score.is_hidden():
return None
except (IndexError, Submission.DoesNotExist):
return None
return ScoreSerializer(score).data
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg)
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}
"""
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _use_read_replica(queryset):
"""
Use the read replica if it's available.
Args:
queryset (QuerySet)
Returns:
QuerySet
"""
return (
queryset.using("read_replica")
if "read_replica" in settings.DATABASES
else queryset
)
|
edx/edx-submissions | submissions/api.py | get_submission | python | def get_submission(submission_uuid, read_replica=False):
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data | Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
} | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/api.py#L241-L304 | [
"def _get_submission_model(uuid, read_replica=False):\n \"\"\"\n Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes\n EDUCATOR-1090, because uuids are stored both with and without hyphens.\n \"\"\"\n submission_qs = Submission.objects\n if read_replica:\n submission_qs = _use_read_replica(submission_qs)\n try:\n submission = submission_qs.get(uuid=uuid)\n except Submission.DoesNotExist:\n try:\n hyphenated_value = six.text_type(UUID(uuid))\n query = \"\"\"\n SELECT\n `submissions_submission`.`id`,\n `submissions_submission`.`uuid`,\n `submissions_submission`.`student_item_id`,\n `submissions_submission`.`attempt_number`,\n `submissions_submission`.`submitted_at`,\n `submissions_submission`.`created_at`,\n `submissions_submission`.`raw_answer`,\n `submissions_submission`.`status`\n FROM\n `submissions_submission`\n WHERE (\n NOT (`submissions_submission`.`status` = 'D')\n AND `submissions_submission`.`uuid` = '{}'\n )\n \"\"\"\n query = query.replace(\"{}\", hyphenated_value)\n\n # We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually\n submission = Submission.objects.raw(query)[0]\n except IndexError:\n raise Submission.DoesNotExist()\n # Avoid the extra hit next time\n submission.save(update_fields=['uuid'])\n return submission\n",
"def get_cache_key(sub_uuid):\n return u\"submissions.submission.{}\".format(sub_uuid)\n"
] | """
Public interface for the submissions app.
"""
from __future__ import absolute_import
import copy
import itertools
import logging
import operator
import json
from uuid import UUID
from django.conf import settings
from django.core.cache import cache
from django.db import IntegrityError, DatabaseError
from submissions.serializers import (
SubmissionSerializer, StudentItemSerializer, ScoreSerializer, UnannotatedScoreSerializer
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary, ScoreAnnotation, score_set, score_reset
import six
logger = logging.getLogger("submissions.api")
# By default, limit the number of top submissions
# Anything above this limit will result in a request error
MAX_TOP_SUBMISSIONS = 100
# Set a relatively low cache timeout for top submissions.
TOP_SUBMISSIONS_CACHE_TIMEOUT = 300
class SubmissionError(Exception):
"""An error that occurs during submission actions.
This error is raised when the submission API cannot perform a requested
action.
"""
pass
class SubmissionInternalError(SubmissionError):
"""An error internal to the Submission API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class SubmissionNotFoundError(SubmissionError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class SubmissionRequestError(SubmissionError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, msg="", field_errors=None):
"""
Configure the submission request error.
Keyword Args:
msg (unicode): The error message.
field_errors (dict): A dictionary of errors (list of unicode)
specific to a fields provided in the request.
Example usage:
>>> raise SubmissionRequestError(
>>> "An unexpected error occurred"
>>> {"answer": ["Maximum answer length exceeded."]}
>>> )
"""
super(SubmissionRequestError, self).__init__(msg)
self.field_errors = (
copy.deepcopy(field_errors)
if field_errors is not None
else {}
)
self.args += (self.field_errors,)
def __repr__(self):
"""
Show the field errors upon output.
"""
return '{}(msg="{}", field_errors={})'.format(
self.__class__.__name__, self.message, self.field_errors
)
def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None):
"""Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _get_submission_model(uuid, read_replica=False):
"""
Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query = """
SELECT
`submissions_submission`.`id`,
`submissions_submission`.`uuid`,
`submissions_submission`.`student_item_id`,
`submissions_submission`.`attempt_number`,
`submissions_submission`.`submitted_at`,
`submissions_submission`.`created_at`,
`submissions_submission`.`raw_answer`,
`submissions_submission`.`status`
FROM
`submissions_submission`
WHERE (
NOT (`submissions_submission`.`status` = 'D')
AND `submissions_submission`.`uuid` = '{}'
)
"""
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission
def get_submission_and_student(uuid, read_replica=False):
"""
Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
"""
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}]
"""
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data
def get_all_submissions(course_id, item_id, item_type, read_replica=True):
"""For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data
def get_all_course_submission_information(course_id, item_type, read_replica=True):
""" For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
)
def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True):
"""Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}]
"""
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions
def get_score(student_item):
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data
def get_scores(course_id, student_id):
"""Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores
def get_latest_score_for_submission(submission_uuid, read_replica=False):
"""
Retrieve the latest score for a particular submission.
Args:
submission_uuid (str): The UUID of the submission to retrieve.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
dict: The serialized score model, or None if no score is available.
"""
try:
# Ensure that submission_uuid is valid before fetching score
submission_model = _get_submission_model(submission_uuid, read_replica)
score_qs = Score.objects.filter(
submission__uuid=submission_model.uuid
).order_by("-id").select_related("submission")
if read_replica:
score_qs = _use_read_replica(score_qs)
score = score_qs[0]
if score.is_hidden():
return None
except (IndexError, Submission.DoesNotExist):
return None
return ScoreSerializer(score).data
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg)
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}
"""
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _use_read_replica(queryset):
"""
Use the read replica if it's available.
Args:
queryset (QuerySet)
Returns:
QuerySet
"""
return (
queryset.using("read_replica")
if "read_replica" in settings.DATABASES
else queryset
)
|
edx/edx-submissions | submissions/api.py | get_submission_and_student | python | def get_submission_and_student(uuid, read_replica=False):
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission | Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors. | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/api.py#L307-L357 | [
"def _use_read_replica(queryset):\n \"\"\"\n Use the read replica if it's available.\n\n Args:\n queryset (QuerySet)\n\n Returns:\n QuerySet\n\n \"\"\"\n return (\n queryset.using(\"read_replica\")\n if \"read_replica\" in settings.DATABASES\n else queryset\n )\n",
"def get_submission(submission_uuid, read_replica=False):\n \"\"\"Retrieves a single submission by uuid.\n\n Args:\n submission_uuid (str): Identifier for the submission.\n\n Kwargs:\n read_replica (bool): If true, attempt to use the read replica database.\n If no read replica is available, use the default database.\n\n Raises:\n SubmissionNotFoundError: Raised if the submission does not exist.\n SubmissionRequestError: Raised if the search parameter is not a string.\n SubmissionInternalError: Raised for unknown errors.\n\n Examples:\n >>> get_submission(\"20b78e0f32df805d21064fc912f40e9ae5ab260d\")\n {\n 'student_item': 2,\n 'attempt_number': 1,\n 'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),\n 'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),\n 'answer': u'The answer is 42.'\n }\n\n \"\"\"\n if not isinstance(submission_uuid, six.string_types):\n if isinstance(submission_uuid, UUID):\n submission_uuid = six.text_type(submission_uuid)\n else:\n raise SubmissionRequestError(\n msg=\"submission_uuid ({!r}) must be serializable\".format(submission_uuid)\n )\n\n cache_key = Submission.get_cache_key(submission_uuid)\n try:\n cached_submission_data = cache.get(cache_key)\n except Exception:\n # The cache backend could raise an exception\n # (for example, memcache keys that contain spaces)\n logger.exception(\"Error occurred while retrieving submission from the cache\")\n cached_submission_data = None\n\n if cached_submission_data:\n logger.info(\"Get submission {} (cached)\".format(submission_uuid))\n return cached_submission_data\n\n try:\n submission = _get_submission_model(submission_uuid, read_replica)\n submission_data = SubmissionSerializer(submission).data\n cache.set(cache_key, submission_data)\n except Submission.DoesNotExist:\n logger.error(\"Submission {} not found.\".format(submission_uuid))\n raise SubmissionNotFoundError(\n u\"No submission matching uuid {}\".format(submission_uuid)\n )\n except Exception as exc:\n # Something very unexpected has just happened (like DB misconfig)\n err_msg = \"Could not get submission due to error: {}\".format(exc)\n logger.exception(err_msg)\n raise SubmissionInternalError(err_msg)\n\n logger.info(\"Get submission {}\".format(submission_uuid))\n return submission_data\n"
] | """
Public interface for the submissions app.
"""
from __future__ import absolute_import
import copy
import itertools
import logging
import operator
import json
from uuid import UUID
from django.conf import settings
from django.core.cache import cache
from django.db import IntegrityError, DatabaseError
from submissions.serializers import (
SubmissionSerializer, StudentItemSerializer, ScoreSerializer, UnannotatedScoreSerializer
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary, ScoreAnnotation, score_set, score_reset
import six
logger = logging.getLogger("submissions.api")
# By default, limit the number of top submissions
# Anything above this limit will result in a request error
MAX_TOP_SUBMISSIONS = 100
# Set a relatively low cache timeout for top submissions.
TOP_SUBMISSIONS_CACHE_TIMEOUT = 300
class SubmissionError(Exception):
"""An error that occurs during submission actions.
This error is raised when the submission API cannot perform a requested
action.
"""
pass
class SubmissionInternalError(SubmissionError):
"""An error internal to the Submission API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class SubmissionNotFoundError(SubmissionError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class SubmissionRequestError(SubmissionError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, msg="", field_errors=None):
"""
Configure the submission request error.
Keyword Args:
msg (unicode): The error message.
field_errors (dict): A dictionary of errors (list of unicode)
specific to a fields provided in the request.
Example usage:
>>> raise SubmissionRequestError(
>>> "An unexpected error occurred"
>>> {"answer": ["Maximum answer length exceeded."]}
>>> )
"""
super(SubmissionRequestError, self).__init__(msg)
self.field_errors = (
copy.deepcopy(field_errors)
if field_errors is not None
else {}
)
self.args += (self.field_errors,)
def __repr__(self):
"""
Show the field errors upon output.
"""
return '{}(msg="{}", field_errors={})'.format(
self.__class__.__name__, self.message, self.field_errors
)
def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None):
"""Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _get_submission_model(uuid, read_replica=False):
"""
Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query = """
SELECT
`submissions_submission`.`id`,
`submissions_submission`.`uuid`,
`submissions_submission`.`student_item_id`,
`submissions_submission`.`attempt_number`,
`submissions_submission`.`submitted_at`,
`submissions_submission`.`created_at`,
`submissions_submission`.`raw_answer`,
`submissions_submission`.`status`
FROM
`submissions_submission`
WHERE (
NOT (`submissions_submission`.`status` = 'D')
AND `submissions_submission`.`uuid` = '{}'
)
"""
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission
def get_submission(submission_uuid, read_replica=False):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}]
"""
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data
def get_all_submissions(course_id, item_id, item_type, read_replica=True):
"""For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data
def get_all_course_submission_information(course_id, item_type, read_replica=True):
""" For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
)
def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True):
"""Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}]
"""
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions
def get_score(student_item):
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data
def get_scores(course_id, student_id):
"""Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores
def get_latest_score_for_submission(submission_uuid, read_replica=False):
"""
Retrieve the latest score for a particular submission.
Args:
submission_uuid (str): The UUID of the submission to retrieve.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
dict: The serialized score model, or None if no score is available.
"""
try:
# Ensure that submission_uuid is valid before fetching score
submission_model = _get_submission_model(submission_uuid, read_replica)
score_qs = Score.objects.filter(
submission__uuid=submission_model.uuid
).order_by("-id").select_related("submission")
if read_replica:
score_qs = _use_read_replica(score_qs)
score = score_qs[0]
if score.is_hidden():
return None
except (IndexError, Submission.DoesNotExist):
return None
return ScoreSerializer(score).data
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg)
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}
"""
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _use_read_replica(queryset):
"""
Use the read replica if it's available.
Args:
queryset (QuerySet)
Returns:
QuerySet
"""
return (
queryset.using("read_replica")
if "read_replica" in settings.DATABASES
else queryset
)
|
edx/edx-submissions | submissions/api.py | get_submissions | python | def get_submissions(student_item_dict, limit=None):
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data | Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}] | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/api.py#L360-L421 | [
"def _get_or_create_student_item(student_item_dict):\n \"\"\"Gets or creates a Student Item that matches the values specified.\n\n Attempts to get the specified Student Item. If it does not exist, the\n specified parameters are validated, and a new Student Item is created.\n\n Args:\n student_item_dict (dict): The dict containing the student_id, item_id,\n course_id, and item_type that uniquely defines a student item.\n\n Returns:\n StudentItem: The student item that was retrieved or created.\n\n Raises:\n SubmissionInternalError: Thrown if there was an internal error while\n attempting to create or retrieve the specified student item.\n SubmissionRequestError: Thrown if the given student item parameters fail\n validation.\n\n Examples:\n >>> student_item_dict = dict(\n >>> student_id=\"Tim\",\n >>> item_id=\"item_1\",\n >>> course_id=\"course_1\",\n >>> item_type=\"type_one\"\n >>> )\n >>> _get_or_create_student_item(student_item_dict)\n {'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}\n\n \"\"\"\n try:\n try:\n return StudentItem.objects.get(**student_item_dict)\n except StudentItem.DoesNotExist:\n student_item_serializer = StudentItemSerializer(\n data=student_item_dict\n )\n if not student_item_serializer.is_valid():\n logger.error(\n u\"Invalid StudentItemSerializer: errors:{} data:{}\".format(\n student_item_serializer.errors,\n student_item_dict\n )\n )\n raise SubmissionRequestError(field_errors=student_item_serializer.errors)\n return student_item_serializer.save()\n except DatabaseError:\n error_message = u\"An error occurred creating student item: {}\".format(\n student_item_dict\n )\n logger.exception(error_message)\n raise SubmissionInternalError(error_message)\n"
] | """
Public interface for the submissions app.
"""
from __future__ import absolute_import
import copy
import itertools
import logging
import operator
import json
from uuid import UUID
from django.conf import settings
from django.core.cache import cache
from django.db import IntegrityError, DatabaseError
from submissions.serializers import (
SubmissionSerializer, StudentItemSerializer, ScoreSerializer, UnannotatedScoreSerializer
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary, ScoreAnnotation, score_set, score_reset
import six
logger = logging.getLogger("submissions.api")
# By default, limit the number of top submissions
# Anything above this limit will result in a request error
MAX_TOP_SUBMISSIONS = 100
# Set a relatively low cache timeout for top submissions.
TOP_SUBMISSIONS_CACHE_TIMEOUT = 300
class SubmissionError(Exception):
"""An error that occurs during submission actions.
This error is raised when the submission API cannot perform a requested
action.
"""
pass
class SubmissionInternalError(SubmissionError):
"""An error internal to the Submission API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class SubmissionNotFoundError(SubmissionError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class SubmissionRequestError(SubmissionError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, msg="", field_errors=None):
"""
Configure the submission request error.
Keyword Args:
msg (unicode): The error message.
field_errors (dict): A dictionary of errors (list of unicode)
specific to a fields provided in the request.
Example usage:
>>> raise SubmissionRequestError(
>>> "An unexpected error occurred"
>>> {"answer": ["Maximum answer length exceeded."]}
>>> )
"""
super(SubmissionRequestError, self).__init__(msg)
self.field_errors = (
copy.deepcopy(field_errors)
if field_errors is not None
else {}
)
self.args += (self.field_errors,)
def __repr__(self):
"""
Show the field errors upon output.
"""
return '{}(msg="{}", field_errors={})'.format(
self.__class__.__name__, self.message, self.field_errors
)
def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None):
"""Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _get_submission_model(uuid, read_replica=False):
"""
Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query = """
SELECT
`submissions_submission`.`id`,
`submissions_submission`.`uuid`,
`submissions_submission`.`student_item_id`,
`submissions_submission`.`attempt_number`,
`submissions_submission`.`submitted_at`,
`submissions_submission`.`created_at`,
`submissions_submission`.`raw_answer`,
`submissions_submission`.`status`
FROM
`submissions_submission`
WHERE (
NOT (`submissions_submission`.`status` = 'D')
AND `submissions_submission`.`uuid` = '{}'
)
"""
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission
def get_submission(submission_uuid, read_replica=False):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
def get_submission_and_student(uuid, read_replica=False):
"""
Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
"""
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
def get_all_submissions(course_id, item_id, item_type, read_replica=True):
"""For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data
def get_all_course_submission_information(course_id, item_type, read_replica=True):
""" For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
)
def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True):
"""Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}]
"""
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions
def get_score(student_item):
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data
def get_scores(course_id, student_id):
"""Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores
def get_latest_score_for_submission(submission_uuid, read_replica=False):
"""
Retrieve the latest score for a particular submission.
Args:
submission_uuid (str): The UUID of the submission to retrieve.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
dict: The serialized score model, or None if no score is available.
"""
try:
# Ensure that submission_uuid is valid before fetching score
submission_model = _get_submission_model(submission_uuid, read_replica)
score_qs = Score.objects.filter(
submission__uuid=submission_model.uuid
).order_by("-id").select_related("submission")
if read_replica:
score_qs = _use_read_replica(score_qs)
score = score_qs[0]
if score.is_hidden():
return None
except (IndexError, Submission.DoesNotExist):
return None
return ScoreSerializer(score).data
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg)
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}
"""
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _use_read_replica(queryset):
"""
Use the read replica if it's available.
Args:
queryset (QuerySet)
Returns:
QuerySet
"""
return (
queryset.using("read_replica")
if "read_replica" in settings.DATABASES
else queryset
)
|
edx/edx-submissions | submissions/api.py | get_all_submissions | python | def get_all_submissions(course_id, item_id, item_type, read_replica=True):
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data | For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable. | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/api.py#L424-L464 | [
"def _use_read_replica(queryset):\n \"\"\"\n Use the read replica if it's available.\n\n Args:\n queryset (QuerySet)\n\n Returns:\n QuerySet\n\n \"\"\"\n return (\n queryset.using(\"read_replica\")\n if \"read_replica\" in settings.DATABASES\n else queryset\n )\n"
] | """
Public interface for the submissions app.
"""
from __future__ import absolute_import
import copy
import itertools
import logging
import operator
import json
from uuid import UUID
from django.conf import settings
from django.core.cache import cache
from django.db import IntegrityError, DatabaseError
from submissions.serializers import (
SubmissionSerializer, StudentItemSerializer, ScoreSerializer, UnannotatedScoreSerializer
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary, ScoreAnnotation, score_set, score_reset
import six
logger = logging.getLogger("submissions.api")
# By default, limit the number of top submissions
# Anything above this limit will result in a request error
MAX_TOP_SUBMISSIONS = 100
# Set a relatively low cache timeout for top submissions.
TOP_SUBMISSIONS_CACHE_TIMEOUT = 300
class SubmissionError(Exception):
"""An error that occurs during submission actions.
This error is raised when the submission API cannot perform a requested
action.
"""
pass
class SubmissionInternalError(SubmissionError):
"""An error internal to the Submission API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class SubmissionNotFoundError(SubmissionError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class SubmissionRequestError(SubmissionError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, msg="", field_errors=None):
"""
Configure the submission request error.
Keyword Args:
msg (unicode): The error message.
field_errors (dict): A dictionary of errors (list of unicode)
specific to a fields provided in the request.
Example usage:
>>> raise SubmissionRequestError(
>>> "An unexpected error occurred"
>>> {"answer": ["Maximum answer length exceeded."]}
>>> )
"""
super(SubmissionRequestError, self).__init__(msg)
self.field_errors = (
copy.deepcopy(field_errors)
if field_errors is not None
else {}
)
self.args += (self.field_errors,)
def __repr__(self):
"""
Show the field errors upon output.
"""
return '{}(msg="{}", field_errors={})'.format(
self.__class__.__name__, self.message, self.field_errors
)
def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None):
"""Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _get_submission_model(uuid, read_replica=False):
"""
Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query = """
SELECT
`submissions_submission`.`id`,
`submissions_submission`.`uuid`,
`submissions_submission`.`student_item_id`,
`submissions_submission`.`attempt_number`,
`submissions_submission`.`submitted_at`,
`submissions_submission`.`created_at`,
`submissions_submission`.`raw_answer`,
`submissions_submission`.`status`
FROM
`submissions_submission`
WHERE (
NOT (`submissions_submission`.`status` = 'D')
AND `submissions_submission`.`uuid` = '{}'
)
"""
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission
def get_submission(submission_uuid, read_replica=False):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
def get_submission_and_student(uuid, read_replica=False):
"""
Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
"""
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}]
"""
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data
def get_all_course_submission_information(course_id, item_type, read_replica=True):
""" For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
)
def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True):
"""Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}]
"""
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions
def get_score(student_item):
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data
def get_scores(course_id, student_id):
"""Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores
def get_latest_score_for_submission(submission_uuid, read_replica=False):
"""
Retrieve the latest score for a particular submission.
Args:
submission_uuid (str): The UUID of the submission to retrieve.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
dict: The serialized score model, or None if no score is available.
"""
try:
# Ensure that submission_uuid is valid before fetching score
submission_model = _get_submission_model(submission_uuid, read_replica)
score_qs = Score.objects.filter(
submission__uuid=submission_model.uuid
).order_by("-id").select_related("submission")
if read_replica:
score_qs = _use_read_replica(score_qs)
score = score_qs[0]
if score.is_hidden():
return None
except (IndexError, Submission.DoesNotExist):
return None
return ScoreSerializer(score).data
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg)
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}
"""
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _use_read_replica(queryset):
"""
Use the read replica if it's available.
Args:
queryset (QuerySet)
Returns:
QuerySet
"""
return (
queryset.using("read_replica")
if "read_replica" in settings.DATABASES
else queryset
)
|
edx/edx-submissions | submissions/api.py | get_all_course_submission_information | python | def get_all_course_submission_information(course_id, item_type, read_replica=True):
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
) | For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/api.py#L467-L523 | [
"def _use_read_replica(queryset):\n \"\"\"\n Use the read replica if it's available.\n\n Args:\n queryset (QuerySet)\n\n Returns:\n QuerySet\n\n \"\"\"\n return (\n queryset.using(\"read_replica\")\n if \"read_replica\" in settings.DATABASES\n else queryset\n )\n"
] | """
Public interface for the submissions app.
"""
from __future__ import absolute_import
import copy
import itertools
import logging
import operator
import json
from uuid import UUID
from django.conf import settings
from django.core.cache import cache
from django.db import IntegrityError, DatabaseError
from submissions.serializers import (
SubmissionSerializer, StudentItemSerializer, ScoreSerializer, UnannotatedScoreSerializer
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary, ScoreAnnotation, score_set, score_reset
import six
logger = logging.getLogger("submissions.api")
# By default, limit the number of top submissions
# Anything above this limit will result in a request error
MAX_TOP_SUBMISSIONS = 100
# Set a relatively low cache timeout for top submissions.
TOP_SUBMISSIONS_CACHE_TIMEOUT = 300
class SubmissionError(Exception):
"""An error that occurs during submission actions.
This error is raised when the submission API cannot perform a requested
action.
"""
pass
class SubmissionInternalError(SubmissionError):
"""An error internal to the Submission API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class SubmissionNotFoundError(SubmissionError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class SubmissionRequestError(SubmissionError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, msg="", field_errors=None):
"""
Configure the submission request error.
Keyword Args:
msg (unicode): The error message.
field_errors (dict): A dictionary of errors (list of unicode)
specific to a fields provided in the request.
Example usage:
>>> raise SubmissionRequestError(
>>> "An unexpected error occurred"
>>> {"answer": ["Maximum answer length exceeded."]}
>>> )
"""
super(SubmissionRequestError, self).__init__(msg)
self.field_errors = (
copy.deepcopy(field_errors)
if field_errors is not None
else {}
)
self.args += (self.field_errors,)
def __repr__(self):
"""
Show the field errors upon output.
"""
return '{}(msg="{}", field_errors={})'.format(
self.__class__.__name__, self.message, self.field_errors
)
def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None):
"""Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _get_submission_model(uuid, read_replica=False):
"""
Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query = """
SELECT
`submissions_submission`.`id`,
`submissions_submission`.`uuid`,
`submissions_submission`.`student_item_id`,
`submissions_submission`.`attempt_number`,
`submissions_submission`.`submitted_at`,
`submissions_submission`.`created_at`,
`submissions_submission`.`raw_answer`,
`submissions_submission`.`status`
FROM
`submissions_submission`
WHERE (
NOT (`submissions_submission`.`status` = 'D')
AND `submissions_submission`.`uuid` = '{}'
)
"""
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission
def get_submission(submission_uuid, read_replica=False):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
def get_submission_and_student(uuid, read_replica=False):
"""
Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
"""
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}]
"""
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data
def get_all_submissions(course_id, item_id, item_type, read_replica=True):
"""For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data
def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True):
"""Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}]
"""
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions
def get_score(student_item):
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data
def get_scores(course_id, student_id):
"""Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores
def get_latest_score_for_submission(submission_uuid, read_replica=False):
"""
Retrieve the latest score for a particular submission.
Args:
submission_uuid (str): The UUID of the submission to retrieve.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
dict: The serialized score model, or None if no score is available.
"""
try:
# Ensure that submission_uuid is valid before fetching score
submission_model = _get_submission_model(submission_uuid, read_replica)
score_qs = Score.objects.filter(
submission__uuid=submission_model.uuid
).order_by("-id").select_related("submission")
if read_replica:
score_qs = _use_read_replica(score_qs)
score = score_qs[0]
if score.is_hidden():
return None
except (IndexError, Submission.DoesNotExist):
return None
return ScoreSerializer(score).data
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg)
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}
"""
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _use_read_replica(queryset):
"""
Use the read replica if it's available.
Args:
queryset (QuerySet)
Returns:
QuerySet
"""
return (
queryset.using("read_replica")
if "read_replica" in settings.DATABASES
else queryset
)
|
edx/edx-submissions | submissions/api.py | get_top_submissions | python | def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True):
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions | Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}] | train | https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/api.py#L526-L625 | [
"def _use_read_replica(queryset):\n \"\"\"\n Use the read replica if it's available.\n\n Args:\n queryset (QuerySet)\n\n Returns:\n QuerySet\n\n \"\"\"\n return (\n queryset.using(\"read_replica\")\n if \"read_replica\" in settings.DATABASES\n else queryset\n )\n"
] | """
Public interface for the submissions app.
"""
from __future__ import absolute_import
import copy
import itertools
import logging
import operator
import json
from uuid import UUID
from django.conf import settings
from django.core.cache import cache
from django.db import IntegrityError, DatabaseError
from submissions.serializers import (
SubmissionSerializer, StudentItemSerializer, ScoreSerializer, UnannotatedScoreSerializer
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary, ScoreAnnotation, score_set, score_reset
import six
logger = logging.getLogger("submissions.api")
# By default, limit the number of top submissions
# Anything above this limit will result in a request error
MAX_TOP_SUBMISSIONS = 100
# Set a relatively low cache timeout for top submissions.
TOP_SUBMISSIONS_CACHE_TIMEOUT = 300
class SubmissionError(Exception):
"""An error that occurs during submission actions.
This error is raised when the submission API cannot perform a requested
action.
"""
pass
class SubmissionInternalError(SubmissionError):
"""An error internal to the Submission API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class SubmissionNotFoundError(SubmissionError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class SubmissionRequestError(SubmissionError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, msg="", field_errors=None):
"""
Configure the submission request error.
Keyword Args:
msg (unicode): The error message.
field_errors (dict): A dictionary of errors (list of unicode)
specific to a fields provided in the request.
Example usage:
>>> raise SubmissionRequestError(
>>> "An unexpected error occurred"
>>> {"answer": ["Maximum answer length exceeded."]}
>>> )
"""
super(SubmissionRequestError, self).__init__(msg)
self.field_errors = (
copy.deepcopy(field_errors)
if field_errors is not None
else {}
)
self.args += (self.field_errors,)
def __repr__(self):
"""
Show the field errors upon output.
"""
return '{}(msg="{}", field_errors={})'.format(
self.__class__.__name__, self.message, self.field_errors
)
def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None):
"""Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _get_submission_model(uuid, read_replica=False):
"""
Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query = """
SELECT
`submissions_submission`.`id`,
`submissions_submission`.`uuid`,
`submissions_submission`.`student_item_id`,
`submissions_submission`.`attempt_number`,
`submissions_submission`.`submitted_at`,
`submissions_submission`.`created_at`,
`submissions_submission`.`raw_answer`,
`submissions_submission`.`status`
FROM
`submissions_submission`
WHERE (
NOT (`submissions_submission`.`status` = 'D')
AND `submissions_submission`.`uuid` = '{}'
)
"""
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission
def get_submission(submission_uuid, read_replica=False):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
def get_submission_and_student(uuid, read_replica=False):
"""
Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
"""
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}]
"""
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data
def get_all_submissions(course_id, item_id, item_type, read_replica=True):
"""For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data
def get_all_course_submission_information(course_id, item_type, read_replica=True):
""" For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
)
def get_score(student_item):
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data
def get_scores(course_id, student_id):
"""Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores
def get_latest_score_for_submission(submission_uuid, read_replica=False):
"""
Retrieve the latest score for a particular submission.
Args:
submission_uuid (str): The UUID of the submission to retrieve.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
dict: The serialized score model, or None if no score is available.
"""
try:
# Ensure that submission_uuid is valid before fetching score
submission_model = _get_submission_model(submission_uuid, read_replica)
score_qs = Score.objects.filter(
submission__uuid=submission_model.uuid
).order_by("-id").select_related("submission")
if read_replica:
score_qs = _use_read_replica(score_qs)
score = score_qs[0]
if score.is_hidden():
return None
except (IndexError, Submission.DoesNotExist):
return None
return ScoreSerializer(score).data
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg)
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}
"""
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def _use_read_replica(queryset):
"""
Use the read replica if it's available.
Args:
queryset (QuerySet)
Returns:
QuerySet
"""
return (
queryset.using("read_replica")
if "read_replica" in settings.DATABASES
else queryset
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.