repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
AirtestProject/Airtest | benchmark/plot.py | PlotResult.extract_data | python | def extract_data(self):
self.time_axis = []
self.cpu_axis = []
self.mem_axis = []
self.timestamp_list = []
plot_data = self.data.get("plot_data", [])
# 按照时间分割线,划分成几段数据,取其中的最值
for i in plot_data:
timestamp = i["timestamp"]
self.timestamp_list.append(timestamp)
timestamp = round(timestamp, 1)
cpu_percent = i["cpu_percent"]
mem_gb_num = i["mem_gb_num"]
date = datetime.fromtimestamp(timestamp)
# 添加坐标轴
self.time_axis.append(date)
self.cpu_axis.append(cpu_percent)
self.mem_axis.append(mem_gb_num)
# 获取各种方法执行过程中的cpu和内存极值:
self.get_each_method_maximun_cpu_mem() | 从数据中获取到绘图相关的有用信息. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/plot.py#L38-L59 | [
"def get_each_method_maximun_cpu_mem(self):\n \"\"\"获取每个方法中的cpu和内存耗费最值点.\"\"\"\n # 本函数用于丰富self.method_exec_info的信息:存入cpu、mem最值点\n self.method_exec_info = deepcopy(self.data.get(\"method_exec_info\", []))\n method_exec_info = deepcopy(self.method_exec_info) # 用来辅助循环\n method_index, cpu_max, cpu_max_time, mem_max, mem_max_time = 0, 0, 0, 0, 0 # 临时变量\n self.max_mem = 0\n for index, timestamp in enumerate(self.timestamp_list):\n # method_exec_info是按顺序的,逐个遍历找出每个method_exec_info中的cpu和mem的最值点和timestamp:\n start, end = method_exec_info[0][\"start_time\"], method_exec_info[0][\"end_time\"]\n if timestamp < start:\n # 方法正式start之前的数据,不能参与方法内的cpu、mem计算,直接忽略此条数据\n continue\n elif timestamp <= end:\n # 方法执行期间的数据,纳入最值比较:\n if self.cpu_axis[index] > cpu_max:\n cpu_max, cpu_max_time = self.cpu_axis[index], timestamp\n if self.mem_axis[index] > mem_max:\n mem_max, mem_max_time = self.mem_axis[index], timestamp\n continue\n else:\n # 本次方法筛选完毕,保存本方法的最值cpu和mem\n if cpu_max_time != 0 and mem_max_time != 0:\n self.method_exec_info[method_index].update({\"cpu_max\": cpu_max, \"mem_max\": mem_max, \"cpu_max_time\": cpu_max_time, \"mem_max_time\": mem_max_time})\n # 保存最大的内存,后面绘图时用\n if mem_max > self.max_mem:\n self.max_mem = mem_max\n cpu_max, mem_max = 0, 0 # 临时变量\n # 准备进行下一个方法的检查,发现已经检查完则正式结束\n del method_exec_info[0]\n if method_exec_info:\n method_index += 1 # 进行下一个方法时:当前方法的序号+1\n continue\n else:\n break\n"
] | class PlotResult(object):
"""绘制单张图片的方法对比结果."""
def __init__(self, dir_path="", file_name=""):
super(PlotResult, self).__init__()
# 提取数据:
if file_name:
file_path = os.path.join(dir_path, file_name)
self.data = self.load_file(file_path)
self.extract_data()
else:
raise Exception("Profile result file not exists..")
def load_file(self, file, print_info=True):
if print_info:
print("loading config from :", repr(file))
try:
config = anyconfig.load(file, ignore_missing=True)
return config
except ValueError:
print("loading config failed...")
return {}
def get_each_method_maximun_cpu_mem(self):
"""获取每个方法中的cpu和内存耗费最值点."""
# 本函数用于丰富self.method_exec_info的信息:存入cpu、mem最值点
self.method_exec_info = deepcopy(self.data.get("method_exec_info", []))
method_exec_info = deepcopy(self.method_exec_info) # 用来辅助循环
method_index, cpu_max, cpu_max_time, mem_max, mem_max_time = 0, 0, 0, 0, 0 # 临时变量
self.max_mem = 0
for index, timestamp in enumerate(self.timestamp_list):
# method_exec_info是按顺序的,逐个遍历找出每个method_exec_info中的cpu和mem的最值点和timestamp:
start, end = method_exec_info[0]["start_time"], method_exec_info[0]["end_time"]
if timestamp < start:
# 方法正式start之前的数据,不能参与方法内的cpu、mem计算,直接忽略此条数据
continue
elif timestamp <= end:
# 方法执行期间的数据,纳入最值比较:
if self.cpu_axis[index] > cpu_max:
cpu_max, cpu_max_time = self.cpu_axis[index], timestamp
if self.mem_axis[index] > mem_max:
mem_max, mem_max_time = self.mem_axis[index], timestamp
continue
else:
# 本次方法筛选完毕,保存本方法的最值cpu和mem
if cpu_max_time != 0 and mem_max_time != 0:
self.method_exec_info[method_index].update({"cpu_max": cpu_max, "mem_max": mem_max, "cpu_max_time": cpu_max_time, "mem_max_time": mem_max_time})
# 保存最大的内存,后面绘图时用
if mem_max > self.max_mem:
self.max_mem = mem_max
cpu_max, mem_max = 0, 0 # 临时变量
# 准备进行下一个方法的检查,发现已经检查完则正式结束
del method_exec_info[0]
if method_exec_info:
method_index += 1 # 进行下一个方法时:当前方法的序号+1
continue
else:
break
def _get_graph_title(self):
"""获取图像的title."""
start_time = datetime.fromtimestamp(int(self.timestamp_list[0]))
end_time = datetime.fromtimestamp(int(self.timestamp_list[-1]))
end_time = end_time.strftime('%H:%M:%S')
title = "Timespan: %s —— %s" % (start_time, end_time)
return title
def plot_cpu_mem_keypoints(self):
"""绘制CPU/Mem/特征点数量."""
plt.figure(1)
# 开始绘制子图:
plt.subplot(311)
title = self._get_graph_title()
plt.title(title, loc="center") # 设置绘图的标题
mem_ins = plt.plot(self.time_axis, self.mem_axis, "-", label="Mem(MB)", color='deepskyblue', linestyle='-', marker=',')
# 设置数字标签
plt.legend(mem_ins, ["Mem(MB)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["mem_max_time"])
text = "%s: %d MB" % (method_exec["name"], method_exec["mem_max"])
plt.text(x, method_exec["mem_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["mem_max"], 'bo', label="point") # 绘制点
# 绘制子图2
plt.subplot(312)
cpu_ins = plt.plot(self.time_axis, self.cpu_axis, "-", label="CPU(%)", color='red', linestyle='-', marker=',')
plt.legend(cpu_ins, ["CPU(%)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.xlabel("Time(s)")
plt.ylabel("CPU(%)")
plt.ylim(0, 120)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["cpu_max_time"])
text = "%s: %d%%" % (method_exec["name"], method_exec["cpu_max"])
plt.text(x, method_exec["cpu_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["cpu_max"], 'ro', label="point") # 绘制点
# 绘制子图3
plt.subplot(313) # 绘制一下柱状图(关键点)
# 设置轴向标签
plt.xlabel('methods')
plt.ylabel('keypoints number')
method_list, method_pts_length_list, color_list = [], [], []
for method_exec in self.method_exec_info:
for item in ["kp_sch", "kp_src", "good"]:
method_list.append("%s-%s" % (method_exec["name"], item))
method_pts_length_list.append(method_exec[item])
if method_exec["result"]:
color_list.append(["palegreen", "limegreen", "deepskyblue"][["kp_sch", "kp_src", "good"].index(item)])
else:
color_list.append("tomato")
method_x = np.arange(len(method_list)) + 1
plt.bar(method_x, method_pts_length_list, width=0.35, align='center', color=color_list, alpha=0.8)
plt.xticks(method_x, method_list, size='small', rotation=30)
# 设置数字标签
for x, y in zip(method_x, method_pts_length_list):
plt.text(x, y + 10, "%d" % y, ha="center", va="bottom", fontsize=7)
plt.ylim(0, max(method_pts_length_list) * 1.2)
# 显示图像
plt.show()
|
AirtestProject/Airtest | benchmark/plot.py | PlotResult.get_each_method_maximun_cpu_mem | python | def get_each_method_maximun_cpu_mem(self):
# 本函数用于丰富self.method_exec_info的信息:存入cpu、mem最值点
self.method_exec_info = deepcopy(self.data.get("method_exec_info", []))
method_exec_info = deepcopy(self.method_exec_info) # 用来辅助循环
method_index, cpu_max, cpu_max_time, mem_max, mem_max_time = 0, 0, 0, 0, 0 # 临时变量
self.max_mem = 0
for index, timestamp in enumerate(self.timestamp_list):
# method_exec_info是按顺序的,逐个遍历找出每个method_exec_info中的cpu和mem的最值点和timestamp:
start, end = method_exec_info[0]["start_time"], method_exec_info[0]["end_time"]
if timestamp < start:
# 方法正式start之前的数据,不能参与方法内的cpu、mem计算,直接忽略此条数据
continue
elif timestamp <= end:
# 方法执行期间的数据,纳入最值比较:
if self.cpu_axis[index] > cpu_max:
cpu_max, cpu_max_time = self.cpu_axis[index], timestamp
if self.mem_axis[index] > mem_max:
mem_max, mem_max_time = self.mem_axis[index], timestamp
continue
else:
# 本次方法筛选完毕,保存本方法的最值cpu和mem
if cpu_max_time != 0 and mem_max_time != 0:
self.method_exec_info[method_index].update({"cpu_max": cpu_max, "mem_max": mem_max, "cpu_max_time": cpu_max_time, "mem_max_time": mem_max_time})
# 保存最大的内存,后面绘图时用
if mem_max > self.max_mem:
self.max_mem = mem_max
cpu_max, mem_max = 0, 0 # 临时变量
# 准备进行下一个方法的检查,发现已经检查完则正式结束
del method_exec_info[0]
if method_exec_info:
method_index += 1 # 进行下一个方法时:当前方法的序号+1
continue
else:
break | 获取每个方法中的cpu和内存耗费最值点. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/plot.py#L61-L95 | null | class PlotResult(object):
"""绘制单张图片的方法对比结果."""
def __init__(self, dir_path="", file_name=""):
super(PlotResult, self).__init__()
# 提取数据:
if file_name:
file_path = os.path.join(dir_path, file_name)
self.data = self.load_file(file_path)
self.extract_data()
else:
raise Exception("Profile result file not exists..")
def load_file(self, file, print_info=True):
if print_info:
print("loading config from :", repr(file))
try:
config = anyconfig.load(file, ignore_missing=True)
return config
except ValueError:
print("loading config failed...")
return {}
def extract_data(self):
"""从数据中获取到绘图相关的有用信息."""
self.time_axis = []
self.cpu_axis = []
self.mem_axis = []
self.timestamp_list = []
plot_data = self.data.get("plot_data", [])
# 按照时间分割线,划分成几段数据,取其中的最值
for i in plot_data:
timestamp = i["timestamp"]
self.timestamp_list.append(timestamp)
timestamp = round(timestamp, 1)
cpu_percent = i["cpu_percent"]
mem_gb_num = i["mem_gb_num"]
date = datetime.fromtimestamp(timestamp)
# 添加坐标轴
self.time_axis.append(date)
self.cpu_axis.append(cpu_percent)
self.mem_axis.append(mem_gb_num)
# 获取各种方法执行过程中的cpu和内存极值:
self.get_each_method_maximun_cpu_mem()
def _get_graph_title(self):
"""获取图像的title."""
start_time = datetime.fromtimestamp(int(self.timestamp_list[0]))
end_time = datetime.fromtimestamp(int(self.timestamp_list[-1]))
end_time = end_time.strftime('%H:%M:%S')
title = "Timespan: %s —— %s" % (start_time, end_time)
return title
def plot_cpu_mem_keypoints(self):
"""绘制CPU/Mem/特征点数量."""
plt.figure(1)
# 开始绘制子图:
plt.subplot(311)
title = self._get_graph_title()
plt.title(title, loc="center") # 设置绘图的标题
mem_ins = plt.plot(self.time_axis, self.mem_axis, "-", label="Mem(MB)", color='deepskyblue', linestyle='-', marker=',')
# 设置数字标签
plt.legend(mem_ins, ["Mem(MB)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["mem_max_time"])
text = "%s: %d MB" % (method_exec["name"], method_exec["mem_max"])
plt.text(x, method_exec["mem_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["mem_max"], 'bo', label="point") # 绘制点
# 绘制子图2
plt.subplot(312)
cpu_ins = plt.plot(self.time_axis, self.cpu_axis, "-", label="CPU(%)", color='red', linestyle='-', marker=',')
plt.legend(cpu_ins, ["CPU(%)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.xlabel("Time(s)")
plt.ylabel("CPU(%)")
plt.ylim(0, 120)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["cpu_max_time"])
text = "%s: %d%%" % (method_exec["name"], method_exec["cpu_max"])
plt.text(x, method_exec["cpu_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["cpu_max"], 'ro', label="point") # 绘制点
# 绘制子图3
plt.subplot(313) # 绘制一下柱状图(关键点)
# 设置轴向标签
plt.xlabel('methods')
plt.ylabel('keypoints number')
method_list, method_pts_length_list, color_list = [], [], []
for method_exec in self.method_exec_info:
for item in ["kp_sch", "kp_src", "good"]:
method_list.append("%s-%s" % (method_exec["name"], item))
method_pts_length_list.append(method_exec[item])
if method_exec["result"]:
color_list.append(["palegreen", "limegreen", "deepskyblue"][["kp_sch", "kp_src", "good"].index(item)])
else:
color_list.append("tomato")
method_x = np.arange(len(method_list)) + 1
plt.bar(method_x, method_pts_length_list, width=0.35, align='center', color=color_list, alpha=0.8)
plt.xticks(method_x, method_list, size='small', rotation=30)
# 设置数字标签
for x, y in zip(method_x, method_pts_length_list):
plt.text(x, y + 10, "%d" % y, ha="center", va="bottom", fontsize=7)
plt.ylim(0, max(method_pts_length_list) * 1.2)
# 显示图像
plt.show()
|
AirtestProject/Airtest | benchmark/plot.py | PlotResult._get_graph_title | python | def _get_graph_title(self):
start_time = datetime.fromtimestamp(int(self.timestamp_list[0]))
end_time = datetime.fromtimestamp(int(self.timestamp_list[-1]))
end_time = end_time.strftime('%H:%M:%S')
title = "Timespan: %s —— %s" % (start_time, end_time)
return title | 获取图像的title. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/plot.py#L97-L104 | null | class PlotResult(object):
"""绘制单张图片的方法对比结果."""
def __init__(self, dir_path="", file_name=""):
super(PlotResult, self).__init__()
# 提取数据:
if file_name:
file_path = os.path.join(dir_path, file_name)
self.data = self.load_file(file_path)
self.extract_data()
else:
raise Exception("Profile result file not exists..")
def load_file(self, file, print_info=True):
if print_info:
print("loading config from :", repr(file))
try:
config = anyconfig.load(file, ignore_missing=True)
return config
except ValueError:
print("loading config failed...")
return {}
def extract_data(self):
"""从数据中获取到绘图相关的有用信息."""
self.time_axis = []
self.cpu_axis = []
self.mem_axis = []
self.timestamp_list = []
plot_data = self.data.get("plot_data", [])
# 按照时间分割线,划分成几段数据,取其中的最值
for i in plot_data:
timestamp = i["timestamp"]
self.timestamp_list.append(timestamp)
timestamp = round(timestamp, 1)
cpu_percent = i["cpu_percent"]
mem_gb_num = i["mem_gb_num"]
date = datetime.fromtimestamp(timestamp)
# 添加坐标轴
self.time_axis.append(date)
self.cpu_axis.append(cpu_percent)
self.mem_axis.append(mem_gb_num)
# 获取各种方法执行过程中的cpu和内存极值:
self.get_each_method_maximun_cpu_mem()
def get_each_method_maximun_cpu_mem(self):
"""获取每个方法中的cpu和内存耗费最值点."""
# 本函数用于丰富self.method_exec_info的信息:存入cpu、mem最值点
self.method_exec_info = deepcopy(self.data.get("method_exec_info", []))
method_exec_info = deepcopy(self.method_exec_info) # 用来辅助循环
method_index, cpu_max, cpu_max_time, mem_max, mem_max_time = 0, 0, 0, 0, 0 # 临时变量
self.max_mem = 0
for index, timestamp in enumerate(self.timestamp_list):
# method_exec_info是按顺序的,逐个遍历找出每个method_exec_info中的cpu和mem的最值点和timestamp:
start, end = method_exec_info[0]["start_time"], method_exec_info[0]["end_time"]
if timestamp < start:
# 方法正式start之前的数据,不能参与方法内的cpu、mem计算,直接忽略此条数据
continue
elif timestamp <= end:
# 方法执行期间的数据,纳入最值比较:
if self.cpu_axis[index] > cpu_max:
cpu_max, cpu_max_time = self.cpu_axis[index], timestamp
if self.mem_axis[index] > mem_max:
mem_max, mem_max_time = self.mem_axis[index], timestamp
continue
else:
# 本次方法筛选完毕,保存本方法的最值cpu和mem
if cpu_max_time != 0 and mem_max_time != 0:
self.method_exec_info[method_index].update({"cpu_max": cpu_max, "mem_max": mem_max, "cpu_max_time": cpu_max_time, "mem_max_time": mem_max_time})
# 保存最大的内存,后面绘图时用
if mem_max > self.max_mem:
self.max_mem = mem_max
cpu_max, mem_max = 0, 0 # 临时变量
# 准备进行下一个方法的检查,发现已经检查完则正式结束
del method_exec_info[0]
if method_exec_info:
method_index += 1 # 进行下一个方法时:当前方法的序号+1
continue
else:
break
def plot_cpu_mem_keypoints(self):
"""绘制CPU/Mem/特征点数量."""
plt.figure(1)
# 开始绘制子图:
plt.subplot(311)
title = self._get_graph_title()
plt.title(title, loc="center") # 设置绘图的标题
mem_ins = plt.plot(self.time_axis, self.mem_axis, "-", label="Mem(MB)", color='deepskyblue', linestyle='-', marker=',')
# 设置数字标签
plt.legend(mem_ins, ["Mem(MB)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["mem_max_time"])
text = "%s: %d MB" % (method_exec["name"], method_exec["mem_max"])
plt.text(x, method_exec["mem_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["mem_max"], 'bo', label="point") # 绘制点
# 绘制子图2
plt.subplot(312)
cpu_ins = plt.plot(self.time_axis, self.cpu_axis, "-", label="CPU(%)", color='red', linestyle='-', marker=',')
plt.legend(cpu_ins, ["CPU(%)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.xlabel("Time(s)")
plt.ylabel("CPU(%)")
plt.ylim(0, 120)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["cpu_max_time"])
text = "%s: %d%%" % (method_exec["name"], method_exec["cpu_max"])
plt.text(x, method_exec["cpu_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["cpu_max"], 'ro', label="point") # 绘制点
# 绘制子图3
plt.subplot(313) # 绘制一下柱状图(关键点)
# 设置轴向标签
plt.xlabel('methods')
plt.ylabel('keypoints number')
method_list, method_pts_length_list, color_list = [], [], []
for method_exec in self.method_exec_info:
for item in ["kp_sch", "kp_src", "good"]:
method_list.append("%s-%s" % (method_exec["name"], item))
method_pts_length_list.append(method_exec[item])
if method_exec["result"]:
color_list.append(["palegreen", "limegreen", "deepskyblue"][["kp_sch", "kp_src", "good"].index(item)])
else:
color_list.append("tomato")
method_x = np.arange(len(method_list)) + 1
plt.bar(method_x, method_pts_length_list, width=0.35, align='center', color=color_list, alpha=0.8)
plt.xticks(method_x, method_list, size='small', rotation=30)
# 设置数字标签
for x, y in zip(method_x, method_pts_length_list):
plt.text(x, y + 10, "%d" % y, ha="center", va="bottom", fontsize=7)
plt.ylim(0, max(method_pts_length_list) * 1.2)
# 显示图像
plt.show()
|
AirtestProject/Airtest | benchmark/plot.py | PlotResult.plot_cpu_mem_keypoints | python | def plot_cpu_mem_keypoints(self):
plt.figure(1)
# 开始绘制子图:
plt.subplot(311)
title = self._get_graph_title()
plt.title(title, loc="center") # 设置绘图的标题
mem_ins = plt.plot(self.time_axis, self.mem_axis, "-", label="Mem(MB)", color='deepskyblue', linestyle='-', marker=',')
# 设置数字标签
plt.legend(mem_ins, ["Mem(MB)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["mem_max_time"])
text = "%s: %d MB" % (method_exec["name"], method_exec["mem_max"])
plt.text(x, method_exec["mem_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["mem_max"], 'bo', label="point") # 绘制点
# 绘制子图2
plt.subplot(312)
cpu_ins = plt.plot(self.time_axis, self.cpu_axis, "-", label="CPU(%)", color='red', linestyle='-', marker=',')
plt.legend(cpu_ins, ["CPU(%)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.xlabel("Time(s)")
plt.ylabel("CPU(%)")
plt.ylim(0, 120)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["cpu_max_time"])
text = "%s: %d%%" % (method_exec["name"], method_exec["cpu_max"])
plt.text(x, method_exec["cpu_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["cpu_max"], 'ro', label="point") # 绘制点
# 绘制子图3
plt.subplot(313) # 绘制一下柱状图(关键点)
# 设置轴向标签
plt.xlabel('methods')
plt.ylabel('keypoints number')
method_list, method_pts_length_list, color_list = [], [], []
for method_exec in self.method_exec_info:
for item in ["kp_sch", "kp_src", "good"]:
method_list.append("%s-%s" % (method_exec["name"], item))
method_pts_length_list.append(method_exec[item])
if method_exec["result"]:
color_list.append(["palegreen", "limegreen", "deepskyblue"][["kp_sch", "kp_src", "good"].index(item)])
else:
color_list.append("tomato")
method_x = np.arange(len(method_list)) + 1
plt.bar(method_x, method_pts_length_list, width=0.35, align='center', color=color_list, alpha=0.8)
plt.xticks(method_x, method_list, size='small', rotation=30)
# 设置数字标签
for x, y in zip(method_x, method_pts_length_list):
plt.text(x, y + 10, "%d" % y, ha="center", va="bottom", fontsize=7)
plt.ylim(0, max(method_pts_length_list) * 1.2)
# 显示图像
plt.show() | 绘制CPU/Mem/特征点数量. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/plot.py#L106-L172 | [
"def _get_graph_title(self):\n \"\"\"获取图像的title.\"\"\"\n start_time = datetime.fromtimestamp(int(self.timestamp_list[0]))\n end_time = datetime.fromtimestamp(int(self.timestamp_list[-1]))\n end_time = end_time.strftime('%H:%M:%S')\n title = \"Timespan: %s —— %s\" % (start_time, end_time)\n\n return title\n"
] | class PlotResult(object):
"""绘制单张图片的方法对比结果."""
def __init__(self, dir_path="", file_name=""):
super(PlotResult, self).__init__()
# 提取数据:
if file_name:
file_path = os.path.join(dir_path, file_name)
self.data = self.load_file(file_path)
self.extract_data()
else:
raise Exception("Profile result file not exists..")
def load_file(self, file, print_info=True):
if print_info:
print("loading config from :", repr(file))
try:
config = anyconfig.load(file, ignore_missing=True)
return config
except ValueError:
print("loading config failed...")
return {}
def extract_data(self):
"""从数据中获取到绘图相关的有用信息."""
self.time_axis = []
self.cpu_axis = []
self.mem_axis = []
self.timestamp_list = []
plot_data = self.data.get("plot_data", [])
# 按照时间分割线,划分成几段数据,取其中的最值
for i in plot_data:
timestamp = i["timestamp"]
self.timestamp_list.append(timestamp)
timestamp = round(timestamp, 1)
cpu_percent = i["cpu_percent"]
mem_gb_num = i["mem_gb_num"]
date = datetime.fromtimestamp(timestamp)
# 添加坐标轴
self.time_axis.append(date)
self.cpu_axis.append(cpu_percent)
self.mem_axis.append(mem_gb_num)
# 获取各种方法执行过程中的cpu和内存极值:
self.get_each_method_maximun_cpu_mem()
def get_each_method_maximun_cpu_mem(self):
"""获取每个方法中的cpu和内存耗费最值点."""
# 本函数用于丰富self.method_exec_info的信息:存入cpu、mem最值点
self.method_exec_info = deepcopy(self.data.get("method_exec_info", []))
method_exec_info = deepcopy(self.method_exec_info) # 用来辅助循环
method_index, cpu_max, cpu_max_time, mem_max, mem_max_time = 0, 0, 0, 0, 0 # 临时变量
self.max_mem = 0
for index, timestamp in enumerate(self.timestamp_list):
# method_exec_info是按顺序的,逐个遍历找出每个method_exec_info中的cpu和mem的最值点和timestamp:
start, end = method_exec_info[0]["start_time"], method_exec_info[0]["end_time"]
if timestamp < start:
# 方法正式start之前的数据,不能参与方法内的cpu、mem计算,直接忽略此条数据
continue
elif timestamp <= end:
# 方法执行期间的数据,纳入最值比较:
if self.cpu_axis[index] > cpu_max:
cpu_max, cpu_max_time = self.cpu_axis[index], timestamp
if self.mem_axis[index] > mem_max:
mem_max, mem_max_time = self.mem_axis[index], timestamp
continue
else:
# 本次方法筛选完毕,保存本方法的最值cpu和mem
if cpu_max_time != 0 and mem_max_time != 0:
self.method_exec_info[method_index].update({"cpu_max": cpu_max, "mem_max": mem_max, "cpu_max_time": cpu_max_time, "mem_max_time": mem_max_time})
# 保存最大的内存,后面绘图时用
if mem_max > self.max_mem:
self.max_mem = mem_max
cpu_max, mem_max = 0, 0 # 临时变量
# 准备进行下一个方法的检查,发现已经检查完则正式结束
del method_exec_info[0]
if method_exec_info:
method_index += 1 # 进行下一个方法时:当前方法的序号+1
continue
else:
break
def _get_graph_title(self):
"""获取图像的title."""
start_time = datetime.fromtimestamp(int(self.timestamp_list[0]))
end_time = datetime.fromtimestamp(int(self.timestamp_list[-1]))
end_time = end_time.strftime('%H:%M:%S')
title = "Timespan: %s —— %s" % (start_time, end_time)
return title
|
AirtestProject/Airtest | benchmark/profile_recorder.py | CheckKeypointResult.refresh_method_objects | python | def refresh_method_objects(self):
self.method_object_dict = {}
for key, method in self.MATCHING_METHODS.items():
method_object = method(self.im_search, self.im_source, self.threshold, self.rgb)
self.method_object_dict.update({key: method_object}) | 初始化方法对象. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L44-L49 | null | class CheckKeypointResult(object):
"""查看基于特征点的图像结果."""
RGB = False
THRESHOLD = 0.7
MATCHING_METHODS = {
"kaze": KAZEMatching,
"brisk": BRISKMatching,
"akaze": AKAZEMatching,
"orb": ORBMatching,
"sift": SIFTMatching,
"surf": SURFMatching,
"brief": BRIEFMatching,
}
def __init__(self, im_search, im_source, threshold=0.8, rgb=True):
super(CheckKeypointResult, self).__init__()
self.im_source = im_source
self.im_search = im_search
self.threshold = threshold or self.THRESHOLD
self.rgb = rgb or self.RGB
# 初始化方法对象
self.refresh_method_objects()
def _get_result(self, method_name="kaze"):
"""获取特征点."""
method_object = self.method_object_dict.get(method_name)
# 提取结果和特征点:
try:
result = method_object.find_best_result()
except Exception:
import traceback
traceback.print_exc()
return [], [], [], None
return method_object.kp_sch, method_object.kp_src, method_object.good, result
def get_and_plot_keypoints(self, method_name, plot=False):
"""获取并且绘制出特征点匹配结果."""
if method_name not in self.method_object_dict.keys():
print("'%s' is not in MATCHING_METHODS" % method_name)
return None
kp_sch, kp_src, good, result = self._get_result(method_name)
if not plot or result is None:
return kp_sch, kp_src, good, result
else:
im_search, im_source = deepcopy(self.im_search), deepcopy(self.im_source)
# 绘制特征点识别情况、基于特征的图像匹配结果:
h_sch, w_sch = im_search.shape[:2]
h_src, w_src = im_source.shape[:2]
# init the plot image:
plot_img = np.zeros([max(h_sch, h_src), w_sch + w_src, 3], np.uint8)
plot_img[:h_sch, :w_sch, :] = im_search
plot_img[:h_src, w_sch:, :] = im_source
# plot good matche points:
for m in good:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画线
cv2.line(plot_img, (int(kp_sch[m.queryIdx].pt[0]), int(kp_sch[m.queryIdx].pt[1])), (int(kp_src[m.trainIdx].pt[0] + w_sch), int(kp_src[m.trainIdx].pt[1])), color)
# plot search_image
for kp in kp_sch:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_search, pos, circle=False, color=color, radius=5)
# plot source_image
for kp in kp_src:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_source, pos, circle=False, color=color, radius=10)
from airtest.aircv import show
show(plot_img)
show(im_search)
show(im_source)
|
AirtestProject/Airtest | benchmark/profile_recorder.py | CheckKeypointResult._get_result | python | def _get_result(self, method_name="kaze"):
method_object = self.method_object_dict.get(method_name)
# 提取结果和特征点:
try:
result = method_object.find_best_result()
except Exception:
import traceback
traceback.print_exc()
return [], [], [], None
return method_object.kp_sch, method_object.kp_src, method_object.good, result | 获取特征点. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L51-L62 | null | class CheckKeypointResult(object):
"""查看基于特征点的图像结果."""
RGB = False
THRESHOLD = 0.7
MATCHING_METHODS = {
"kaze": KAZEMatching,
"brisk": BRISKMatching,
"akaze": AKAZEMatching,
"orb": ORBMatching,
"sift": SIFTMatching,
"surf": SURFMatching,
"brief": BRIEFMatching,
}
def __init__(self, im_search, im_source, threshold=0.8, rgb=True):
super(CheckKeypointResult, self).__init__()
self.im_source = im_source
self.im_search = im_search
self.threshold = threshold or self.THRESHOLD
self.rgb = rgb or self.RGB
# 初始化方法对象
self.refresh_method_objects()
def refresh_method_objects(self):
"""初始化方法对象."""
self.method_object_dict = {}
for key, method in self.MATCHING_METHODS.items():
method_object = method(self.im_search, self.im_source, self.threshold, self.rgb)
self.method_object_dict.update({key: method_object})
def get_and_plot_keypoints(self, method_name, plot=False):
"""获取并且绘制出特征点匹配结果."""
if method_name not in self.method_object_dict.keys():
print("'%s' is not in MATCHING_METHODS" % method_name)
return None
kp_sch, kp_src, good, result = self._get_result(method_name)
if not plot or result is None:
return kp_sch, kp_src, good, result
else:
im_search, im_source = deepcopy(self.im_search), deepcopy(self.im_source)
# 绘制特征点识别情况、基于特征的图像匹配结果:
h_sch, w_sch = im_search.shape[:2]
h_src, w_src = im_source.shape[:2]
# init the plot image:
plot_img = np.zeros([max(h_sch, h_src), w_sch + w_src, 3], np.uint8)
plot_img[:h_sch, :w_sch, :] = im_search
plot_img[:h_src, w_sch:, :] = im_source
# plot good matche points:
for m in good:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画线
cv2.line(plot_img, (int(kp_sch[m.queryIdx].pt[0]), int(kp_sch[m.queryIdx].pt[1])), (int(kp_src[m.trainIdx].pt[0] + w_sch), int(kp_src[m.trainIdx].pt[1])), color)
# plot search_image
for kp in kp_sch:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_search, pos, circle=False, color=color, radius=5)
# plot source_image
for kp in kp_src:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_source, pos, circle=False, color=color, radius=10)
from airtest.aircv import show
show(plot_img)
show(im_search)
show(im_source)
|
AirtestProject/Airtest | benchmark/profile_recorder.py | CheckKeypointResult.get_and_plot_keypoints | python | def get_and_plot_keypoints(self, method_name, plot=False):
if method_name not in self.method_object_dict.keys():
print("'%s' is not in MATCHING_METHODS" % method_name)
return None
kp_sch, kp_src, good, result = self._get_result(method_name)
if not plot or result is None:
return kp_sch, kp_src, good, result
else:
im_search, im_source = deepcopy(self.im_search), deepcopy(self.im_source)
# 绘制特征点识别情况、基于特征的图像匹配结果:
h_sch, w_sch = im_search.shape[:2]
h_src, w_src = im_source.shape[:2]
# init the plot image:
plot_img = np.zeros([max(h_sch, h_src), w_sch + w_src, 3], np.uint8)
plot_img[:h_sch, :w_sch, :] = im_search
plot_img[:h_src, w_sch:, :] = im_source
# plot good matche points:
for m in good:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画线
cv2.line(plot_img, (int(kp_sch[m.queryIdx].pt[0]), int(kp_sch[m.queryIdx].pt[1])), (int(kp_src[m.trainIdx].pt[0] + w_sch), int(kp_src[m.trainIdx].pt[1])), color)
# plot search_image
for kp in kp_sch:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_search, pos, circle=False, color=color, radius=5)
# plot source_image
for kp in kp_src:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_source, pos, circle=False, color=color, radius=10)
from airtest.aircv import show
show(plot_img)
show(im_search)
show(im_source) | 获取并且绘制出特征点匹配结果. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L64-L100 | null | class CheckKeypointResult(object):
"""查看基于特征点的图像结果."""
RGB = False
THRESHOLD = 0.7
MATCHING_METHODS = {
"kaze": KAZEMatching,
"brisk": BRISKMatching,
"akaze": AKAZEMatching,
"orb": ORBMatching,
"sift": SIFTMatching,
"surf": SURFMatching,
"brief": BRIEFMatching,
}
def __init__(self, im_search, im_source, threshold=0.8, rgb=True):
super(CheckKeypointResult, self).__init__()
self.im_source = im_source
self.im_search = im_search
self.threshold = threshold or self.THRESHOLD
self.rgb = rgb or self.RGB
# 初始化方法对象
self.refresh_method_objects()
def refresh_method_objects(self):
"""初始化方法对象."""
self.method_object_dict = {}
for key, method in self.MATCHING_METHODS.items():
method_object = method(self.im_search, self.im_source, self.threshold, self.rgb)
self.method_object_dict.update({key: method_object})
def _get_result(self, method_name="kaze"):
"""获取特征点."""
method_object = self.method_object_dict.get(method_name)
# 提取结果和特征点:
try:
result = method_object.find_best_result()
except Exception:
import traceback
traceback.print_exc()
return [], [], [], None
return method_object.kp_sch, method_object.kp_src, method_object.good, result
|
AirtestProject/Airtest | benchmark/profile_recorder.py | RecordThread.run | python | def run(self):
while not self.stop_flag:
timestamp = time.time()
cpu_percent = self.process.cpu_percent() / self.cpu_num
# mem_percent = mem = self.process.memory_percent()
mem_info = dict(self.process.memory_info()._asdict())
mem_gb_num = mem_info.get('rss', 0) / 1024 / 1024
# 记录类变量
self.profile_data.append({"mem_gb_num": mem_gb_num, "cpu_percent": cpu_percent, "timestamp": timestamp})
# 记录cpu和mem_gb_num
time.sleep(self.interval) | 开始线程. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L121-L132 | null | class RecordThread(threading.Thread):
"""记录CPU和内存数据的thread."""
def __init__(self, interval=0.1):
super(RecordThread, self).__init__()
self.pid = os.getpid()
self.interval = interval
self.cpu_num = psutil.cpu_count()
self.process = psutil.Process(self.pid)
self.profile_data = []
self.stop_flag = False
def set_interval(self, interval):
"""设置数据采集间隔."""
self.interval = interval
|
AirtestProject/Airtest | benchmark/profile_recorder.py | ProfileRecorder.load_images | python | def load_images(self, search_file, source_file):
self.search_file, self.source_file = search_file, source_file
self.im_search, self.im_source = imread(self.search_file), imread(self.source_file)
# 初始化对象
self.check_macthing_object = CheckKeypointResult(self.im_search, self.im_source) | 加载待匹配图片. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L145-L150 | [
"def imread(filename):\n \"\"\"根据图片路径,将图片读取为cv2的图片处理格式.\"\"\"\n if not os.path.isfile(filename):\n raise FileNotExistError(\"File not exist: %s\" % filename)\n if PY3:\n img = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n else:\n filename = filename.encode(sys.getfilesystemencoding())\n img = cv2.imread(filename, 1)\n return img\n"
] | class ProfileRecorder(object):
"""帮助用户记录性能数据."""
def __init__(self, profile_interval=0.1):
super(ProfileRecorder, self).__init__()
self.record_thread = RecordThread()
self.record_thread.set_interval(profile_interval)
def profile_methods(self, method_list):
"""帮助函数执行时记录数据."""
self.method_exec_info = []
# 开始数据记录进程
self.record_thread.stop_flag = False
self.record_thread.start()
for name in method_list:
if name not in self.check_macthing_object.MATCHING_METHODS.keys():
continue
time.sleep(3) # 留出绘图空白区
start_time = time.time() # 记录开始时间
print("--->>> start '%s' matching:\n" % name)
kp_sch, kp_src, good, result = self.check_macthing_object.get_and_plot_keypoints(name) # 根据方法名绘制对应的识别结果
print("\n\n\n")
end_time = time.time() # 记录结束时间
time.sleep(3) # 留出绘图空白区
# 记录本次匹配的相关数据
ret_info = {
"name": name,
"start_time": start_time,
"end_time": end_time,
"result": result,
"kp_sch": len(kp_sch),
"kp_src": len(kp_src),
"good": len(good)}
self.method_exec_info.append(ret_info)
self.record_thread.stop_flag = True
def wite_to_json(self, dir_path="", file_name=""):
"""将性能数据写入文件."""
# 提取数据
data = {
"plot_data": self.record_thread.profile_data,
"method_exec_info": self.method_exec_info,
"search_file": self.search_file,
"source_file": self.source_file}
# 写入文件
file_path = os.path.join(dir_path, file_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
json.dump(data, open(file_path, "w+"), indent=4)
|
AirtestProject/Airtest | benchmark/profile_recorder.py | ProfileRecorder.profile_methods | python | def profile_methods(self, method_list):
self.method_exec_info = []
# 开始数据记录进程
self.record_thread.stop_flag = False
self.record_thread.start()
for name in method_list:
if name not in self.check_macthing_object.MATCHING_METHODS.keys():
continue
time.sleep(3) # 留出绘图空白区
start_time = time.time() # 记录开始时间
print("--->>> start '%s' matching:\n" % name)
kp_sch, kp_src, good, result = self.check_macthing_object.get_and_plot_keypoints(name) # 根据方法名绘制对应的识别结果
print("\n\n\n")
end_time = time.time() # 记录结束时间
time.sleep(3) # 留出绘图空白区
# 记录本次匹配的相关数据
ret_info = {
"name": name,
"start_time": start_time,
"end_time": end_time,
"result": result,
"kp_sch": len(kp_sch),
"kp_src": len(kp_src),
"good": len(good)}
self.method_exec_info.append(ret_info)
self.record_thread.stop_flag = True | 帮助函数执行时记录数据. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L152-L180 | null | class ProfileRecorder(object):
"""帮助用户记录性能数据."""
def __init__(self, profile_interval=0.1):
super(ProfileRecorder, self).__init__()
self.record_thread = RecordThread()
self.record_thread.set_interval(profile_interval)
def load_images(self, search_file, source_file):
"""加载待匹配图片."""
self.search_file, self.source_file = search_file, source_file
self.im_search, self.im_source = imread(self.search_file), imread(self.source_file)
# 初始化对象
self.check_macthing_object = CheckKeypointResult(self.im_search, self.im_source)
def wite_to_json(self, dir_path="", file_name=""):
"""将性能数据写入文件."""
# 提取数据
data = {
"plot_data": self.record_thread.profile_data,
"method_exec_info": self.method_exec_info,
"search_file": self.search_file,
"source_file": self.source_file}
# 写入文件
file_path = os.path.join(dir_path, file_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
json.dump(data, open(file_path, "w+"), indent=4)
|
AirtestProject/Airtest | benchmark/profile_recorder.py | ProfileRecorder.wite_to_json | python | def wite_to_json(self, dir_path="", file_name=""):
# 提取数据
data = {
"plot_data": self.record_thread.profile_data,
"method_exec_info": self.method_exec_info,
"search_file": self.search_file,
"source_file": self.source_file}
# 写入文件
file_path = os.path.join(dir_path, file_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
json.dump(data, open(file_path, "w+"), indent=4) | 将性能数据写入文件. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L182-L194 | null | class ProfileRecorder(object):
"""帮助用户记录性能数据."""
def __init__(self, profile_interval=0.1):
super(ProfileRecorder, self).__init__()
self.record_thread = RecordThread()
self.record_thread.set_interval(profile_interval)
def load_images(self, search_file, source_file):
"""加载待匹配图片."""
self.search_file, self.source_file = search_file, source_file
self.im_search, self.im_source = imread(self.search_file), imread(self.source_file)
# 初始化对象
self.check_macthing_object = CheckKeypointResult(self.im_search, self.im_source)
def profile_methods(self, method_list):
"""帮助函数执行时记录数据."""
self.method_exec_info = []
# 开始数据记录进程
self.record_thread.stop_flag = False
self.record_thread.start()
for name in method_list:
if name not in self.check_macthing_object.MATCHING_METHODS.keys():
continue
time.sleep(3) # 留出绘图空白区
start_time = time.time() # 记录开始时间
print("--->>> start '%s' matching:\n" % name)
kp_sch, kp_src, good, result = self.check_macthing_object.get_and_plot_keypoints(name) # 根据方法名绘制对应的识别结果
print("\n\n\n")
end_time = time.time() # 记录结束时间
time.sleep(3) # 留出绘图空白区
# 记录本次匹配的相关数据
ret_info = {
"name": name,
"start_time": start_time,
"end_time": end_time,
"result": result,
"kp_sch": len(kp_sch),
"kp_src": len(kp_src),
"good": len(good)}
self.method_exec_info.append(ret_info)
self.record_thread.stop_flag = True
|
AirtestProject/Airtest | playground/poco.py | PocoReport.translate_poco_step | python | def translate_poco_step(self, step):
ret = {}
prev_step = self._steps[-1]
if prev_step:
ret.update(prev_step)
ret['type'] = step[1].get("name", "")
if step.get('trace'):
ret['trace'] = step['trace']
ret['traceback'] = step.get('traceback')
if ret['type'] == 'touch':
# 取出点击位置
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
elif ret['type'] == 'swipe':
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
# swipe 需要显示一个方向
vector = step[1]["kwargs"].get("vector")
if vector:
ret['swipe'] = self.dis_vector(vector)
ret['vector'] = vector
ret['desc'] = self.func_desc_poco(ret)
ret['title'] = self._translate_title(ret)
return ret | 处理poco的相关操作,参数与airtest的不同,由一个截图和一个操作构成,需要合成一个步骤
Parameters
----------
step 一个完整的操作,如click
prev_step 前一个步骤,应该是截图
Returns
------- | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/poco.py#L12-L53 | [
"def func_desc_poco(self, step):\n \"\"\" 把对应的poco操作显示成中文\"\"\"\n desc = {\n \"touch\": u\"点击UI组件 {name}\".format(name=step.get(\"text\", \"\")),\n }\n if step['type'] in desc:\n return desc.get(step['type'])\n else:\n return self._translate_desc(step)\n",
"def _translate_title(self, name, step):\n title = {\n \"touch\": u\"Touch\",\n \"swipe\": u\"Swipe\",\n \"wait\": u\"Wait\",\n \"exists\": u\"Exists\",\n \"text\": u\"Text\",\n \"keyevent\": u\"Keyevent\",\n \"sleep\": u\"Sleep\",\n \"assert_exists\": u\"Assert exists\",\n \"assert_not_exists\": u\"Assert not exists\",\n \"snapshot\": u\"Snapshot\",\n \"assert_equal\": u\"Assert equal\",\n \"assert_not_equal\": u\"Assert not equal\",\n }\n\n return title.get(name, name)\n"
] | class PocoReport(report.LogToHtml):
def translate(self, step):
if step["is_poco"] is True:
return self.translate_poco_step(step)
else:
return super(PocoReport, self).translate(step)
def func_desc_poco(self, step):
""" 把对应的poco操作显示成中文"""
desc = {
"touch": u"点击UI组件 {name}".format(name=step.get("text", "")),
}
if step['type'] in desc:
return desc.get(step['type'])
else:
return self._translate_desc(step)
|
AirtestProject/Airtest | playground/poco.py | PocoReport.func_desc_poco | python | def func_desc_poco(self, step):
desc = {
"touch": u"点击UI组件 {name}".format(name=step.get("text", "")),
}
if step['type'] in desc:
return desc.get(step['type'])
else:
return self._translate_desc(step) | 把对应的poco操作显示成中文 | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/poco.py#L55-L63 | [
"def _translate_desc(self, step, code):\n \"\"\" 函数描述 \"\"\"\n if step['tag'] != \"function\":\n return None\n name = step['data']['name']\n res = step['data'].get('ret')\n args = {i[\"key\"]: i[\"value\"] for i in code[\"args\"]}\n\n desc = {\n \"snapshot\": lambda: u\"Screenshot description: %s\" % args.get(\"msg\"),\n \"touch\": lambda: u\"Touch %s\" % (\"target image\" if isinstance(args['v'], dict) else \"coordinates %s\" % args['v']),\n \"swipe\": u\"Swipe on screen\",\n \"wait\": u\"Wait for target image to appear\",\n \"exists\": lambda: u\"Image %s exists\" % (\"\" if res else \"not\"),\n \"text\": lambda: u\"Input text:%s\" % args.get('text'),\n \"keyevent\": lambda: u\"Click [%s] button\" % args.get('keyname'),\n \"sleep\": lambda: u\"Wait for %s seconds\" % args.get('secs'),\n \"assert_exists\": u\"Assert target image exists\",\n \"assert_not_exists\": u\"Assert target image does not exists\",\n }\n\n # todo: 最好用js里的多语言实现\n desc_zh = {\n \"snapshot\": lambda: u\"截图描述: %s\" % args.get(\"msg\"),\n \"touch\": lambda: u\"点击 %s\" % (u\"目标图片\" if isinstance(args['v'], dict) else u\"屏幕坐标 %s\" % args['v']),\n \"swipe\": u\"滑动操作\",\n \"wait\": u\"等待目标图片出现\",\n \"exists\": lambda: u\"图片%s存在\" % (\"\" if res else u\"不\"),\n \"text\": lambda: u\"输入文字:%s\" % args.get('text'),\n \"keyevent\": lambda: u\"点击[%s]按键\" % args.get('keyname'),\n \"sleep\": lambda: u\"等待%s秒\" % args.get('secs'),\n \"assert_exists\": u\"断言目标图片存在\",\n \"assert_not_exists\": u\"断言目标图片不存在\",\n }\n\n if self.lang == \"zh\":\n desc = desc_zh\n\n ret = desc.get(name)\n if callable(ret):\n ret = ret()\n return ret\n"
] | class PocoReport(report.LogToHtml):
def translate(self, step):
if step["is_poco"] is True:
return self.translate_poco_step(step)
else:
return super(PocoReport, self).translate(step)
def translate_poco_step(self, step):
"""
处理poco的相关操作,参数与airtest的不同,由一个截图和一个操作构成,需要合成一个步骤
Parameters
----------
step 一个完整的操作,如click
prev_step 前一个步骤,应该是截图
Returns
-------
"""
ret = {}
prev_step = self._steps[-1]
if prev_step:
ret.update(prev_step)
ret['type'] = step[1].get("name", "")
if step.get('trace'):
ret['trace'] = step['trace']
ret['traceback'] = step.get('traceback')
if ret['type'] == 'touch':
# 取出点击位置
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
elif ret['type'] == 'swipe':
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
# swipe 需要显示一个方向
vector = step[1]["kwargs"].get("vector")
if vector:
ret['swipe'] = self.dis_vector(vector)
ret['vector'] = vector
ret['desc'] = self.func_desc_poco(ret)
ret['title'] = self._translate_title(ret)
return ret
|
AirtestProject/Airtest | benchmark/benchmark.py | profile_different_methods | python | def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name) | 对指定的图片进行性能测试. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L12-L20 | [
"def load_images(self, search_file, source_file):\n \"\"\"加载待匹配图片.\"\"\"\n self.search_file, self.source_file = search_file, source_file\n self.im_search, self.im_source = imread(self.search_file), imread(self.source_file)\n # 初始化对象\n self.check_macthing_object = CheckKeypointResult(self.im_search, self.im_source)\n",
"def profile_methods(self, method_list):\n \"\"\"帮助函数执行时记录数据.\"\"\"\n self.method_exec_info = []\n # 开始数据记录进程\n self.record_thread.stop_flag = False\n self.record_thread.start()\n\n for name in method_list:\n if name not in self.check_macthing_object.MATCHING_METHODS.keys():\n continue\n time.sleep(3) # 留出绘图空白区\n start_time = time.time() # 记录开始时间\n print(\"--->>> start '%s' matching:\\n\" % name)\n kp_sch, kp_src, good, result = self.check_macthing_object.get_and_plot_keypoints(name) # 根据方法名绘制对应的识别结果\n print(\"\\n\\n\\n\")\n end_time = time.time() # 记录结束时间\n time.sleep(3) # 留出绘图空白区\n # 记录本次匹配的相关数据\n ret_info = {\n \"name\": name,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"result\": result,\n \"kp_sch\": len(kp_sch),\n \"kp_src\": len(kp_src),\n \"good\": len(good)}\n self.method_exec_info.append(ret_info)\n\n self.record_thread.stop_flag = True\n",
"def wite_to_json(self, dir_path=\"\", file_name=\"\"):\n \"\"\"将性能数据写入文件.\"\"\"\n # 提取数据\n data = {\n \"plot_data\": self.record_thread.profile_data,\n \"method_exec_info\": self.method_exec_info,\n \"search_file\": self.search_file,\n \"source_file\": self.source_file}\n # 写入文件\n file_path = os.path.join(dir_path, file_name)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n json.dump(data, open(file_path, \"w+\"), indent=4)\n"
] | # -*- coding: utf-8 -*-
"""This module test the Airtest keypoint matching methods."""
from random import random
import matplotlib.pyplot as plt
from plot import PlotResult
from profile_recorder import ProfileRecorder
def plot_one_image_result(dir_path, file_name):
"""绘制结果."""
plot_object = PlotResult(dir_path, file_name)
plot_object.plot_cpu_mem_keypoints()
def test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list):
"""单张图片:性能测试+绘制结果."""
# 写入性能数据
profile_different_methods(search_file, screen_file, method_list, dir_path, file_name)
# 绘制图形
plot_one_image_result(dir_path, file_name)
def test_and_profile_all_images(method_list):
"""测试各种images,作对比."""
# 生成性能数据1
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
profile_different_methods(search_file, screen_file, method_list, high_dpi_dir_path, high_dpi_file_name)
# 生成性能数据2
search_file, screen_file = "sample\\rich_texture\\search.png", "sample\\rich_texture\\screen.png"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
profile_different_methods(search_file, screen_file, method_list, rich_texture_dir_path, rich_texture_file_name)
# 生成性能数据3
search_file, screen_file = "sample\\text\\search.png", "sample\\text\\screen.png"
text_dir_path, text_file_name = "result", "text.json"
profile_different_methods(search_file, screen_file, method_list, text_dir_path, text_file_name)
def plot_profiled_all_images_table(method_list):
"""绘制多个图片的结果."""
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
text_dir_path, text_file_name = "result", "text.json"
image_list = ['high_dpi', 'rich_texture', 'text']
# high_dpi_method_exec_info
high_dpi_plot_object = PlotResult(high_dpi_dir_path, high_dpi_file_name)
high_dpi_method_exec_info = high_dpi_plot_object.method_exec_info
# rich_texture_method_exec_info
rich_texture_plot_object = PlotResult(rich_texture_dir_path, rich_texture_file_name)
rich_texture_method_exec_info = rich_texture_plot_object.method_exec_info
# text_method_exec_info
text_plot_object = PlotResult(text_dir_path, text_file_name)
text_method_exec_info = text_plot_object.method_exec_info
exec_info_list = [high_dpi_method_exec_info, rich_texture_method_exec_info, text_method_exec_info]
# 提取对应结果:
mem_compare_dict, cpu_compare_dict, succeed_compare_dict = {}, {}, {}
for index, method in enumerate(method_list):
mem_list, cpu_list, succeed_list = [], [], []
for exec_info in exec_info_list:
current_method_exec_info = exec_info[index]
mem_list.append(round(current_method_exec_info["mem_max"], 2)) # MB
# mem_list.append(round(current_method_exec_info["mem_max"] / 1024, 2)) # GB
cpu_list.append(round(current_method_exec_info["cpu_max"], 2))
succeed_ret = True if current_method_exec_info["result"] else False
succeed_list.append(succeed_ret)
mem_compare_dict.update({method: mem_list})
cpu_compare_dict.update({method: cpu_list})
succeed_compare_dict.update({method: succeed_list})
color_list = get_color_list(method_list)
# # 绘制三张表格
# plot_compare_table(image_list, method_list, color_list, mem_compare_dict, "memory (GB)", 311)
# plot_compare_table(image_list, method_list, color_list, cpu_compare_dict, "CPU (%)", 312)
# plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Result", 313)
# plt.show()
# 绘制两个曲线图、一个表格图:
plot_compare_curves(image_list, method_list, color_list, mem_compare_dict, "Title: Memory (GB)", 311)
plot_compare_curves(image_list, method_list, color_list, cpu_compare_dict, "Title: CPU (%)", 312)
plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Title: Result", 313)
plt.show()
def get_color_list(method_list):
"""获取method对应的color列表."""
color_list = []
for method in method_list:
color = tuple([random() for _ in range(3)]) # 随机颜色画线
color_list.append(color)
return color_list
def plot_compare_table(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制了对比表格."""
row_labels = image_list
# 写入值:
table_vals = []
for i in range(len(row_labels)):
row_vals = []
for method in method_list:
row_vals.append(compare_dict[method][i])
table_vals.append(row_vals)
# 绘制表格图
colors = [[(0.95, 0.95, 0.95) for c in range(len(method_list))] for r in range(len(row_labels))] # cell的颜色
# plt.figure(figsize=(8, 4), dpi=120)
plt.subplot(fig_num)
plt.title(fig_name) # 绘制标题
lightgrn = (0.5, 0.8, 0.5) # 这个是label的背景色
plt.table(cellText=table_vals,
rowLabels=row_labels,
colLabels=method_list,
rowColours=[lightgrn] * len(row_labels),
colColours=color_list,
cellColours=colors,
cellLoc='center',
loc='upper left')
plt.axis('off') # 关闭坐标轴
def plot_compare_curves(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制对比曲线."""
plt.subplot(fig_num)
plt.title(fig_name, loc="center") # 设置绘图的标题
mix_ins = []
for index, method in enumerate(method_list):
mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color=color_list[index], linestyle='-', marker='.')
# mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color='deepskyblue', linestyle='-', marker='.')
mix_ins.append(mem_ins)
plt.legend(loc='upper right') # 说明标签的位置
plt.grid() # 加网格
# plt.xlabel("Image")
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
if __name__ == '__main__':
method_list = ["kaze", "brisk", "akaze", "orb", "sift", "surf", "brief"]
# 针对一张图片,绘制该张图片的cpu和mem使用情况.截屏[2907, 1403] 截图[1079, 804]
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
dir_path, file_name = "result", "high_dpi.json"
test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list)
# 测试多张图片,写入性能测试数据
test_and_profile_all_images(method_list)
# 对比绘制多张图片的结果
plot_profiled_all_images_table(method_list)
|
AirtestProject/Airtest | benchmark/benchmark.py | plot_profiled_all_images_table | python | def plot_profiled_all_images_table(method_list):
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
text_dir_path, text_file_name = "result", "text.json"
image_list = ['high_dpi', 'rich_texture', 'text']
# high_dpi_method_exec_info
high_dpi_plot_object = PlotResult(high_dpi_dir_path, high_dpi_file_name)
high_dpi_method_exec_info = high_dpi_plot_object.method_exec_info
# rich_texture_method_exec_info
rich_texture_plot_object = PlotResult(rich_texture_dir_path, rich_texture_file_name)
rich_texture_method_exec_info = rich_texture_plot_object.method_exec_info
# text_method_exec_info
text_plot_object = PlotResult(text_dir_path, text_file_name)
text_method_exec_info = text_plot_object.method_exec_info
exec_info_list = [high_dpi_method_exec_info, rich_texture_method_exec_info, text_method_exec_info]
# 提取对应结果:
mem_compare_dict, cpu_compare_dict, succeed_compare_dict = {}, {}, {}
for index, method in enumerate(method_list):
mem_list, cpu_list, succeed_list = [], [], []
for exec_info in exec_info_list:
current_method_exec_info = exec_info[index]
mem_list.append(round(current_method_exec_info["mem_max"], 2)) # MB
# mem_list.append(round(current_method_exec_info["mem_max"] / 1024, 2)) # GB
cpu_list.append(round(current_method_exec_info["cpu_max"], 2))
succeed_ret = True if current_method_exec_info["result"] else False
succeed_list.append(succeed_ret)
mem_compare_dict.update({method: mem_list})
cpu_compare_dict.update({method: cpu_list})
succeed_compare_dict.update({method: succeed_list})
color_list = get_color_list(method_list)
# # 绘制三张表格
# plot_compare_table(image_list, method_list, color_list, mem_compare_dict, "memory (GB)", 311)
# plot_compare_table(image_list, method_list, color_list, cpu_compare_dict, "CPU (%)", 312)
# plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Result", 313)
# plt.show()
# 绘制两个曲线图、一个表格图:
plot_compare_curves(image_list, method_list, color_list, mem_compare_dict, "Title: Memory (GB)", 311)
plot_compare_curves(image_list, method_list, color_list, cpu_compare_dict, "Title: CPU (%)", 312)
plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Title: Result", 313)
plt.show() | 绘制多个图片的结果. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L53-L99 | [
"def get_color_list(method_list):\n \"\"\"获取method对应的color列表.\"\"\"\n color_list = []\n for method in method_list:\n color = tuple([random() for _ in range(3)]) # 随机颜色画线\n color_list.append(color)\n return color_list\n",
"def plot_compare_curves(image_list, method_list, color_list, compare_dict, fig_name=\"\", fig_num=111):\n \"\"\"绘制对比曲线.\"\"\"\n plt.subplot(fig_num)\n plt.title(fig_name, loc=\"center\") # 设置绘图的标题\n mix_ins = []\n for index, method in enumerate(method_list):\n mem_ins = plt.plot(image_list, compare_dict[method], \"-\", label=method, color=color_list[index], linestyle='-', marker='.')\n # mem_ins = plt.plot(image_list, compare_dict[method], \"-\", label=method, color='deepskyblue', linestyle='-', marker='.')\n mix_ins.append(mem_ins)\n\n plt.legend(loc='upper right') # 说明标签的位置\n plt.grid() # 加网格\n # plt.xlabel(\"Image\")\n plt.ylabel(\"Mem(MB)\")\n plt.ylim(bottom=0)\n",
"def plot_compare_table(image_list, method_list, color_list, compare_dict, fig_name=\"\", fig_num=111):\n \"\"\"绘制了对比表格.\"\"\"\n row_labels = image_list\n # 写入值:\n table_vals = []\n for i in range(len(row_labels)):\n row_vals = []\n for method in method_list:\n row_vals.append(compare_dict[method][i])\n table_vals.append(row_vals)\n # 绘制表格图\n colors = [[(0.95, 0.95, 0.95) for c in range(len(method_list))] for r in range(len(row_labels))] # cell的颜色\n # plt.figure(figsize=(8, 4), dpi=120)\n plt.subplot(fig_num)\n plt.title(fig_name) # 绘制标题\n lightgrn = (0.5, 0.8, 0.5) # 这个是label的背景色\n plt.table(cellText=table_vals,\n rowLabels=row_labels,\n colLabels=method_list,\n rowColours=[lightgrn] * len(row_labels),\n colColours=color_list,\n cellColours=colors,\n cellLoc='center',\n loc='upper left')\n\n plt.axis('off') # 关闭坐标轴\n"
] | # -*- coding: utf-8 -*-
"""This module test the Airtest keypoint matching methods."""
from random import random
import matplotlib.pyplot as plt
from plot import PlotResult
from profile_recorder import ProfileRecorder
def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
"""对指定的图片进行性能测试."""
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name)
def plot_one_image_result(dir_path, file_name):
"""绘制结果."""
plot_object = PlotResult(dir_path, file_name)
plot_object.plot_cpu_mem_keypoints()
def test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list):
"""单张图片:性能测试+绘制结果."""
# 写入性能数据
profile_different_methods(search_file, screen_file, method_list, dir_path, file_name)
# 绘制图形
plot_one_image_result(dir_path, file_name)
def test_and_profile_all_images(method_list):
"""测试各种images,作对比."""
# 生成性能数据1
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
profile_different_methods(search_file, screen_file, method_list, high_dpi_dir_path, high_dpi_file_name)
# 生成性能数据2
search_file, screen_file = "sample\\rich_texture\\search.png", "sample\\rich_texture\\screen.png"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
profile_different_methods(search_file, screen_file, method_list, rich_texture_dir_path, rich_texture_file_name)
# 生成性能数据3
search_file, screen_file = "sample\\text\\search.png", "sample\\text\\screen.png"
text_dir_path, text_file_name = "result", "text.json"
profile_different_methods(search_file, screen_file, method_list, text_dir_path, text_file_name)
def get_color_list(method_list):
"""获取method对应的color列表."""
color_list = []
for method in method_list:
color = tuple([random() for _ in range(3)]) # 随机颜色画线
color_list.append(color)
return color_list
def plot_compare_table(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制了对比表格."""
row_labels = image_list
# 写入值:
table_vals = []
for i in range(len(row_labels)):
row_vals = []
for method in method_list:
row_vals.append(compare_dict[method][i])
table_vals.append(row_vals)
# 绘制表格图
colors = [[(0.95, 0.95, 0.95) for c in range(len(method_list))] for r in range(len(row_labels))] # cell的颜色
# plt.figure(figsize=(8, 4), dpi=120)
plt.subplot(fig_num)
plt.title(fig_name) # 绘制标题
lightgrn = (0.5, 0.8, 0.5) # 这个是label的背景色
plt.table(cellText=table_vals,
rowLabels=row_labels,
colLabels=method_list,
rowColours=[lightgrn] * len(row_labels),
colColours=color_list,
cellColours=colors,
cellLoc='center',
loc='upper left')
plt.axis('off') # 关闭坐标轴
def plot_compare_curves(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制对比曲线."""
plt.subplot(fig_num)
plt.title(fig_name, loc="center") # 设置绘图的标题
mix_ins = []
for index, method in enumerate(method_list):
mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color=color_list[index], linestyle='-', marker='.')
# mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color='deepskyblue', linestyle='-', marker='.')
mix_ins.append(mem_ins)
plt.legend(loc='upper right') # 说明标签的位置
plt.grid() # 加网格
# plt.xlabel("Image")
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
if __name__ == '__main__':
method_list = ["kaze", "brisk", "akaze", "orb", "sift", "surf", "brief"]
# 针对一张图片,绘制该张图片的cpu和mem使用情况.截屏[2907, 1403] 截图[1079, 804]
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
dir_path, file_name = "result", "high_dpi.json"
test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list)
# 测试多张图片,写入性能测试数据
test_and_profile_all_images(method_list)
# 对比绘制多张图片的结果
plot_profiled_all_images_table(method_list)
|
AirtestProject/Airtest | benchmark/benchmark.py | get_color_list | python | def get_color_list(method_list):
color_list = []
for method in method_list:
color = tuple([random() for _ in range(3)]) # 随机颜色画线
color_list.append(color)
return color_list | 获取method对应的color列表. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L102-L108 | null | # -*- coding: utf-8 -*-
"""This module test the Airtest keypoint matching methods."""
from random import random
import matplotlib.pyplot as plt
from plot import PlotResult
from profile_recorder import ProfileRecorder
def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
"""对指定的图片进行性能测试."""
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name)
def plot_one_image_result(dir_path, file_name):
"""绘制结果."""
plot_object = PlotResult(dir_path, file_name)
plot_object.plot_cpu_mem_keypoints()
def test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list):
"""单张图片:性能测试+绘制结果."""
# 写入性能数据
profile_different_methods(search_file, screen_file, method_list, dir_path, file_name)
# 绘制图形
plot_one_image_result(dir_path, file_name)
def test_and_profile_all_images(method_list):
"""测试各种images,作对比."""
# 生成性能数据1
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
profile_different_methods(search_file, screen_file, method_list, high_dpi_dir_path, high_dpi_file_name)
# 生成性能数据2
search_file, screen_file = "sample\\rich_texture\\search.png", "sample\\rich_texture\\screen.png"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
profile_different_methods(search_file, screen_file, method_list, rich_texture_dir_path, rich_texture_file_name)
# 生成性能数据3
search_file, screen_file = "sample\\text\\search.png", "sample\\text\\screen.png"
text_dir_path, text_file_name = "result", "text.json"
profile_different_methods(search_file, screen_file, method_list, text_dir_path, text_file_name)
def plot_profiled_all_images_table(method_list):
"""绘制多个图片的结果."""
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
text_dir_path, text_file_name = "result", "text.json"
image_list = ['high_dpi', 'rich_texture', 'text']
# high_dpi_method_exec_info
high_dpi_plot_object = PlotResult(high_dpi_dir_path, high_dpi_file_name)
high_dpi_method_exec_info = high_dpi_plot_object.method_exec_info
# rich_texture_method_exec_info
rich_texture_plot_object = PlotResult(rich_texture_dir_path, rich_texture_file_name)
rich_texture_method_exec_info = rich_texture_plot_object.method_exec_info
# text_method_exec_info
text_plot_object = PlotResult(text_dir_path, text_file_name)
text_method_exec_info = text_plot_object.method_exec_info
exec_info_list = [high_dpi_method_exec_info, rich_texture_method_exec_info, text_method_exec_info]
# 提取对应结果:
mem_compare_dict, cpu_compare_dict, succeed_compare_dict = {}, {}, {}
for index, method in enumerate(method_list):
mem_list, cpu_list, succeed_list = [], [], []
for exec_info in exec_info_list:
current_method_exec_info = exec_info[index]
mem_list.append(round(current_method_exec_info["mem_max"], 2)) # MB
# mem_list.append(round(current_method_exec_info["mem_max"] / 1024, 2)) # GB
cpu_list.append(round(current_method_exec_info["cpu_max"], 2))
succeed_ret = True if current_method_exec_info["result"] else False
succeed_list.append(succeed_ret)
mem_compare_dict.update({method: mem_list})
cpu_compare_dict.update({method: cpu_list})
succeed_compare_dict.update({method: succeed_list})
color_list = get_color_list(method_list)
# # 绘制三张表格
# plot_compare_table(image_list, method_list, color_list, mem_compare_dict, "memory (GB)", 311)
# plot_compare_table(image_list, method_list, color_list, cpu_compare_dict, "CPU (%)", 312)
# plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Result", 313)
# plt.show()
# 绘制两个曲线图、一个表格图:
plot_compare_curves(image_list, method_list, color_list, mem_compare_dict, "Title: Memory (GB)", 311)
plot_compare_curves(image_list, method_list, color_list, cpu_compare_dict, "Title: CPU (%)", 312)
plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Title: Result", 313)
plt.show()
def plot_compare_table(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制了对比表格."""
row_labels = image_list
# 写入值:
table_vals = []
for i in range(len(row_labels)):
row_vals = []
for method in method_list:
row_vals.append(compare_dict[method][i])
table_vals.append(row_vals)
# 绘制表格图
colors = [[(0.95, 0.95, 0.95) for c in range(len(method_list))] for r in range(len(row_labels))] # cell的颜色
# plt.figure(figsize=(8, 4), dpi=120)
plt.subplot(fig_num)
plt.title(fig_name) # 绘制标题
lightgrn = (0.5, 0.8, 0.5) # 这个是label的背景色
plt.table(cellText=table_vals,
rowLabels=row_labels,
colLabels=method_list,
rowColours=[lightgrn] * len(row_labels),
colColours=color_list,
cellColours=colors,
cellLoc='center',
loc='upper left')
plt.axis('off') # 关闭坐标轴
def plot_compare_curves(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制对比曲线."""
plt.subplot(fig_num)
plt.title(fig_name, loc="center") # 设置绘图的标题
mix_ins = []
for index, method in enumerate(method_list):
mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color=color_list[index], linestyle='-', marker='.')
# mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color='deepskyblue', linestyle='-', marker='.')
mix_ins.append(mem_ins)
plt.legend(loc='upper right') # 说明标签的位置
plt.grid() # 加网格
# plt.xlabel("Image")
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
if __name__ == '__main__':
method_list = ["kaze", "brisk", "akaze", "orb", "sift", "surf", "brief"]
# 针对一张图片,绘制该张图片的cpu和mem使用情况.截屏[2907, 1403] 截图[1079, 804]
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
dir_path, file_name = "result", "high_dpi.json"
test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list)
# 测试多张图片,写入性能测试数据
test_and_profile_all_images(method_list)
# 对比绘制多张图片的结果
plot_profiled_all_images_table(method_list)
|
AirtestProject/Airtest | benchmark/benchmark.py | plot_compare_table | python | def plot_compare_table(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
row_labels = image_list
# 写入值:
table_vals = []
for i in range(len(row_labels)):
row_vals = []
for method in method_list:
row_vals.append(compare_dict[method][i])
table_vals.append(row_vals)
# 绘制表格图
colors = [[(0.95, 0.95, 0.95) for c in range(len(method_list))] for r in range(len(row_labels))] # cell的颜色
# plt.figure(figsize=(8, 4), dpi=120)
plt.subplot(fig_num)
plt.title(fig_name) # 绘制标题
lightgrn = (0.5, 0.8, 0.5) # 这个是label的背景色
plt.table(cellText=table_vals,
rowLabels=row_labels,
colLabels=method_list,
rowColours=[lightgrn] * len(row_labels),
colColours=color_list,
cellColours=colors,
cellLoc='center',
loc='upper left')
plt.axis('off') | 绘制了对比表格. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L111-L136 | null | # -*- coding: utf-8 -*-
"""This module test the Airtest keypoint matching methods."""
from random import random
import matplotlib.pyplot as plt
from plot import PlotResult
from profile_recorder import ProfileRecorder
def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
"""对指定的图片进行性能测试."""
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name)
def plot_one_image_result(dir_path, file_name):
"""绘制结果."""
plot_object = PlotResult(dir_path, file_name)
plot_object.plot_cpu_mem_keypoints()
def test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list):
"""单张图片:性能测试+绘制结果."""
# 写入性能数据
profile_different_methods(search_file, screen_file, method_list, dir_path, file_name)
# 绘制图形
plot_one_image_result(dir_path, file_name)
def test_and_profile_all_images(method_list):
"""测试各种images,作对比."""
# 生成性能数据1
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
profile_different_methods(search_file, screen_file, method_list, high_dpi_dir_path, high_dpi_file_name)
# 生成性能数据2
search_file, screen_file = "sample\\rich_texture\\search.png", "sample\\rich_texture\\screen.png"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
profile_different_methods(search_file, screen_file, method_list, rich_texture_dir_path, rich_texture_file_name)
# 生成性能数据3
search_file, screen_file = "sample\\text\\search.png", "sample\\text\\screen.png"
text_dir_path, text_file_name = "result", "text.json"
profile_different_methods(search_file, screen_file, method_list, text_dir_path, text_file_name)
def plot_profiled_all_images_table(method_list):
"""绘制多个图片的结果."""
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
text_dir_path, text_file_name = "result", "text.json"
image_list = ['high_dpi', 'rich_texture', 'text']
# high_dpi_method_exec_info
high_dpi_plot_object = PlotResult(high_dpi_dir_path, high_dpi_file_name)
high_dpi_method_exec_info = high_dpi_plot_object.method_exec_info
# rich_texture_method_exec_info
rich_texture_plot_object = PlotResult(rich_texture_dir_path, rich_texture_file_name)
rich_texture_method_exec_info = rich_texture_plot_object.method_exec_info
# text_method_exec_info
text_plot_object = PlotResult(text_dir_path, text_file_name)
text_method_exec_info = text_plot_object.method_exec_info
exec_info_list = [high_dpi_method_exec_info, rich_texture_method_exec_info, text_method_exec_info]
# 提取对应结果:
mem_compare_dict, cpu_compare_dict, succeed_compare_dict = {}, {}, {}
for index, method in enumerate(method_list):
mem_list, cpu_list, succeed_list = [], [], []
for exec_info in exec_info_list:
current_method_exec_info = exec_info[index]
mem_list.append(round(current_method_exec_info["mem_max"], 2)) # MB
# mem_list.append(round(current_method_exec_info["mem_max"] / 1024, 2)) # GB
cpu_list.append(round(current_method_exec_info["cpu_max"], 2))
succeed_ret = True if current_method_exec_info["result"] else False
succeed_list.append(succeed_ret)
mem_compare_dict.update({method: mem_list})
cpu_compare_dict.update({method: cpu_list})
succeed_compare_dict.update({method: succeed_list})
color_list = get_color_list(method_list)
# # 绘制三张表格
# plot_compare_table(image_list, method_list, color_list, mem_compare_dict, "memory (GB)", 311)
# plot_compare_table(image_list, method_list, color_list, cpu_compare_dict, "CPU (%)", 312)
# plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Result", 313)
# plt.show()
# 绘制两个曲线图、一个表格图:
plot_compare_curves(image_list, method_list, color_list, mem_compare_dict, "Title: Memory (GB)", 311)
plot_compare_curves(image_list, method_list, color_list, cpu_compare_dict, "Title: CPU (%)", 312)
plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Title: Result", 313)
plt.show()
def get_color_list(method_list):
"""获取method对应的color列表."""
color_list = []
for method in method_list:
color = tuple([random() for _ in range(3)]) # 随机颜色画线
color_list.append(color)
return color_list
# 关闭坐标轴
def plot_compare_curves(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制对比曲线."""
plt.subplot(fig_num)
plt.title(fig_name, loc="center") # 设置绘图的标题
mix_ins = []
for index, method in enumerate(method_list):
mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color=color_list[index], linestyle='-', marker='.')
# mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color='deepskyblue', linestyle='-', marker='.')
mix_ins.append(mem_ins)
plt.legend(loc='upper right') # 说明标签的位置
plt.grid() # 加网格
# plt.xlabel("Image")
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
if __name__ == '__main__':
method_list = ["kaze", "brisk", "akaze", "orb", "sift", "surf", "brief"]
# 针对一张图片,绘制该张图片的cpu和mem使用情况.截屏[2907, 1403] 截图[1079, 804]
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
dir_path, file_name = "result", "high_dpi.json"
test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list)
# 测试多张图片,写入性能测试数据
test_and_profile_all_images(method_list)
# 对比绘制多张图片的结果
plot_profiled_all_images_table(method_list)
|
AirtestProject/Airtest | benchmark/benchmark.py | plot_compare_curves | python | def plot_compare_curves(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
plt.subplot(fig_num)
plt.title(fig_name, loc="center") # 设置绘图的标题
mix_ins = []
for index, method in enumerate(method_list):
mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color=color_list[index], linestyle='-', marker='.')
# mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color='deepskyblue', linestyle='-', marker='.')
mix_ins.append(mem_ins)
plt.legend(loc='upper right') # 说明标签的位置
plt.grid() # 加网格
# plt.xlabel("Image")
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0) | 绘制对比曲线. | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L139-L153 | null | # -*- coding: utf-8 -*-
"""This module test the Airtest keypoint matching methods."""
from random import random
import matplotlib.pyplot as plt
from plot import PlotResult
from profile_recorder import ProfileRecorder
def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
"""对指定的图片进行性能测试."""
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name)
def plot_one_image_result(dir_path, file_name):
"""绘制结果."""
plot_object = PlotResult(dir_path, file_name)
plot_object.plot_cpu_mem_keypoints()
def test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list):
"""单张图片:性能测试+绘制结果."""
# 写入性能数据
profile_different_methods(search_file, screen_file, method_list, dir_path, file_name)
# 绘制图形
plot_one_image_result(dir_path, file_name)
def test_and_profile_all_images(method_list):
"""测试各种images,作对比."""
# 生成性能数据1
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
profile_different_methods(search_file, screen_file, method_list, high_dpi_dir_path, high_dpi_file_name)
# 生成性能数据2
search_file, screen_file = "sample\\rich_texture\\search.png", "sample\\rich_texture\\screen.png"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
profile_different_methods(search_file, screen_file, method_list, rich_texture_dir_path, rich_texture_file_name)
# 生成性能数据3
search_file, screen_file = "sample\\text\\search.png", "sample\\text\\screen.png"
text_dir_path, text_file_name = "result", "text.json"
profile_different_methods(search_file, screen_file, method_list, text_dir_path, text_file_name)
def plot_profiled_all_images_table(method_list):
"""绘制多个图片的结果."""
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
text_dir_path, text_file_name = "result", "text.json"
image_list = ['high_dpi', 'rich_texture', 'text']
# high_dpi_method_exec_info
high_dpi_plot_object = PlotResult(high_dpi_dir_path, high_dpi_file_name)
high_dpi_method_exec_info = high_dpi_plot_object.method_exec_info
# rich_texture_method_exec_info
rich_texture_plot_object = PlotResult(rich_texture_dir_path, rich_texture_file_name)
rich_texture_method_exec_info = rich_texture_plot_object.method_exec_info
# text_method_exec_info
text_plot_object = PlotResult(text_dir_path, text_file_name)
text_method_exec_info = text_plot_object.method_exec_info
exec_info_list = [high_dpi_method_exec_info, rich_texture_method_exec_info, text_method_exec_info]
# 提取对应结果:
mem_compare_dict, cpu_compare_dict, succeed_compare_dict = {}, {}, {}
for index, method in enumerate(method_list):
mem_list, cpu_list, succeed_list = [], [], []
for exec_info in exec_info_list:
current_method_exec_info = exec_info[index]
mem_list.append(round(current_method_exec_info["mem_max"], 2)) # MB
# mem_list.append(round(current_method_exec_info["mem_max"] / 1024, 2)) # GB
cpu_list.append(round(current_method_exec_info["cpu_max"], 2))
succeed_ret = True if current_method_exec_info["result"] else False
succeed_list.append(succeed_ret)
mem_compare_dict.update({method: mem_list})
cpu_compare_dict.update({method: cpu_list})
succeed_compare_dict.update({method: succeed_list})
color_list = get_color_list(method_list)
# # 绘制三张表格
# plot_compare_table(image_list, method_list, color_list, mem_compare_dict, "memory (GB)", 311)
# plot_compare_table(image_list, method_list, color_list, cpu_compare_dict, "CPU (%)", 312)
# plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Result", 313)
# plt.show()
# 绘制两个曲线图、一个表格图:
plot_compare_curves(image_list, method_list, color_list, mem_compare_dict, "Title: Memory (GB)", 311)
plot_compare_curves(image_list, method_list, color_list, cpu_compare_dict, "Title: CPU (%)", 312)
plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Title: Result", 313)
plt.show()
def get_color_list(method_list):
"""获取method对应的color列表."""
color_list = []
for method in method_list:
color = tuple([random() for _ in range(3)]) # 随机颜色画线
color_list.append(color)
return color_list
def plot_compare_table(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制了对比表格."""
row_labels = image_list
# 写入值:
table_vals = []
for i in range(len(row_labels)):
row_vals = []
for method in method_list:
row_vals.append(compare_dict[method][i])
table_vals.append(row_vals)
# 绘制表格图
colors = [[(0.95, 0.95, 0.95) for c in range(len(method_list))] for r in range(len(row_labels))] # cell的颜色
# plt.figure(figsize=(8, 4), dpi=120)
plt.subplot(fig_num)
plt.title(fig_name) # 绘制标题
lightgrn = (0.5, 0.8, 0.5) # 这个是label的背景色
plt.table(cellText=table_vals,
rowLabels=row_labels,
colLabels=method_list,
rowColours=[lightgrn] * len(row_labels),
colColours=color_list,
cellColours=colors,
cellLoc='center',
loc='upper left')
plt.axis('off') # 关闭坐标轴
if __name__ == '__main__':
method_list = ["kaze", "brisk", "akaze", "orb", "sift", "surf", "brief"]
# 针对一张图片,绘制该张图片的cpu和mem使用情况.截屏[2907, 1403] 截图[1079, 804]
search_file, screen_file = "sample\\high_dpi\\tpl1551940579340.png", "sample\\high_dpi\\tpl1551944272194.png"
dir_path, file_name = "result", "high_dpi.json"
test_and_profile_and_plot(search_file, screen_file, dir_path, file_name, method_list)
# 测试多张图片,写入性能测试数据
test_and_profile_all_images(method_list)
# 对比绘制多张图片的结果
plot_profiled_all_images_table(method_list)
|
joeyespo/gitpress | gitpress/helpers.py | remove_directory | python | def remove_directory(directory, show_warnings=True):
errors = []
def onerror(function, path, excinfo):
if show_warnings:
print 'Cannot delete %s: %s' % (os.path.relpath(directory), excinfo[1])
errors.append((function, path, excinfo))
if os.path.exists(directory):
if not os.path.isdir(directory):
raise NotADirectoryError(directory)
shutil.rmtree(directory, onerror=onerror)
return errors | Deletes a directory and its contents.
Returns a list of errors in form (function, path, excinfo). | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/helpers.py#L13-L28 | null | import os
import shutil
class NotADirectoryError(Exception):
"""Indicates a file was found when a directory was expected."""
def __init__(self, directory, message=None):
super(NotADirectoryError, self).__init__(
'Expected a directory, found a file instead at ' + directory)
self.directory = os.path.abspath(directory)
def copy_files(source_files, target_directory, source_directory=None):
"""Copies a list of files to the specified directory.
If source_directory is provided, it will be prepended to each source file."""
try:
os.makedirs(target_directory)
except: # TODO: specific exception?
pass
for f in source_files:
source = os.path.join(source_directory, f) if source_directory else f
target = os.path.join(target_directory, f)
shutil.copy2(source, target)
def yes_or_no(message):
"""Gets user input and returns True for yes and False for no."""
while True:
print message, '(yes/no)',
line = raw_input()
if line is None:
return None
line = line.lower()
if line == 'y' or line == 'ye' or line == 'yes':
return True
if line == 'n' or line == 'no':
return False
|
joeyespo/gitpress | gitpress/helpers.py | copy_files | python | def copy_files(source_files, target_directory, source_directory=None):
try:
os.makedirs(target_directory)
except: # TODO: specific exception?
pass
for f in source_files:
source = os.path.join(source_directory, f) if source_directory else f
target = os.path.join(target_directory, f)
shutil.copy2(source, target) | Copies a list of files to the specified directory.
If source_directory is provided, it will be prepended to each source file. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/helpers.py#L31-L41 | null | import os
import shutil
class NotADirectoryError(Exception):
"""Indicates a file was found when a directory was expected."""
def __init__(self, directory, message=None):
super(NotADirectoryError, self).__init__(
'Expected a directory, found a file instead at ' + directory)
self.directory = os.path.abspath(directory)
def remove_directory(directory, show_warnings=True):
"""Deletes a directory and its contents.
Returns a list of errors in form (function, path, excinfo)."""
errors = []
def onerror(function, path, excinfo):
if show_warnings:
print 'Cannot delete %s: %s' % (os.path.relpath(directory), excinfo[1])
errors.append((function, path, excinfo))
if os.path.exists(directory):
if not os.path.isdir(directory):
raise NotADirectoryError(directory)
shutil.rmtree(directory, onerror=onerror)
return errors
def yes_or_no(message):
"""Gets user input and returns True for yes and False for no."""
while True:
print message, '(yes/no)',
line = raw_input()
if line is None:
return None
line = line.lower()
if line == 'y' or line == 'ye' or line == 'yes':
return True
if line == 'n' or line == 'no':
return False
|
joeyespo/gitpress | gitpress/helpers.py | yes_or_no | python | def yes_or_no(message):
while True:
print message, '(yes/no)',
line = raw_input()
if line is None:
return None
line = line.lower()
if line == 'y' or line == 'ye' or line == 'yes':
return True
if line == 'n' or line == 'no':
return False | Gets user input and returns True for yes and False for no. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/helpers.py#L44-L55 | null | import os
import shutil
class NotADirectoryError(Exception):
"""Indicates a file was found when a directory was expected."""
def __init__(self, directory, message=None):
super(NotADirectoryError, self).__init__(
'Expected a directory, found a file instead at ' + directory)
self.directory = os.path.abspath(directory)
def remove_directory(directory, show_warnings=True):
"""Deletes a directory and its contents.
Returns a list of errors in form (function, path, excinfo)."""
errors = []
def onerror(function, path, excinfo):
if show_warnings:
print 'Cannot delete %s: %s' % (os.path.relpath(directory), excinfo[1])
errors.append((function, path, excinfo))
if os.path.exists(directory):
if not os.path.isdir(directory):
raise NotADirectoryError(directory)
shutil.rmtree(directory, onerror=onerror)
return errors
def copy_files(source_files, target_directory, source_directory=None):
"""Copies a list of files to the specified directory.
If source_directory is provided, it will be prepended to each source file."""
try:
os.makedirs(target_directory)
except: # TODO: specific exception?
pass
for f in source_files:
source = os.path.join(source_directory, f) if source_directory else f
target = os.path.join(target_directory, f)
shutil.copy2(source, target)
|
joeyespo/gitpress | gitpress/plugins.py | list_plugins | python | def list_plugins(directory=None):
repo = require_repo(directory)
plugins = get_value(repo, 'plugins')
if not plugins or not isinstance(plugins, dict):
return None
return plugins.keys() | Gets a list of the installed themes. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/plugins.py#L5-L11 | [
"def require_repo(directory=None):\n \"\"\"Checks for a presentation repository and raises an exception if not found.\"\"\"\n if directory and not os.path.isdir(directory):\n raise ValueError('Directory not found: ' + repr(directory))\n repo = repo_path(directory)\n if not os.path.isdir(repo):\n raise RepositoryNotFoundError(directory)\n return repo\n",
"def get_value(repo_directory, key, expect_type=None):\n \"\"\"Gets the value of the specified key in the config file.\"\"\"\n config = read_config(repo_directory)\n value = config.get(key)\n if expect_type and value is not None and not isinstance(value, expect_type):\n raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'\n % (repr(key), repr(expect_type), repr(type(value))))\n return value\n"
] | from .config import get_value, set_value
from .repository import require_repo
def add_plugin(plugin, directory=None):
"""Adds the specified plugin. This returns False if it was already added."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin in plugins:
return False
plugins[plugin] = {}
set_value(repo, 'plugins', plugins)
return True
def remove_plugin(plugin, directory=None):
"""Removes the specified plugin."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin not in plugins:
return False
del plugins[plugin]
set_value(repo, 'plugins', plugins)
return True
def get_plugin_settings(plugin, directory=None):
"""Gets the settings for the specified plugin."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins')
return plugins.get(plugin) if isinstance(plugins, dict) else None
|
joeyespo/gitpress | gitpress/plugins.py | add_plugin | python | def add_plugin(plugin, directory=None):
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin in plugins:
return False
plugins[plugin] = {}
set_value(repo, 'plugins', plugins)
return True | Adds the specified plugin. This returns False if it was already added. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/plugins.py#L14-L23 | [
"def require_repo(directory=None):\n \"\"\"Checks for a presentation repository and raises an exception if not found.\"\"\"\n if directory and not os.path.isdir(directory):\n raise ValueError('Directory not found: ' + repr(directory))\n repo = repo_path(directory)\n if not os.path.isdir(repo):\n raise RepositoryNotFoundError(directory)\n return repo\n",
"def get_value(repo_directory, key, expect_type=None):\n \"\"\"Gets the value of the specified key in the config file.\"\"\"\n config = read_config(repo_directory)\n value = config.get(key)\n if expect_type and value is not None and not isinstance(value, expect_type):\n raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'\n % (repr(key), repr(expect_type), repr(type(value))))\n return value\n",
"def set_value(repo_directory, key, value, strict=True):\n \"\"\"Sets the value of a particular key in the config file. This has no effect when setting to the same value.\"\"\"\n if value is None:\n raise ValueError('Argument \"value\" must not be None.')\n\n # Read values and do nothing if not making any changes\n config = read_config(repo_directory)\n old = config.get(key)\n if old == value:\n return old\n\n # Check schema\n if strict and old is not None and not isinstance(old, type(value)):\n raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'\n % (repr(key), repr(type(value)), repr(type(old))))\n\n # Set new value and save results\n config[key] = value\n write_config(repo_directory, config)\n return old\n"
] | from .config import get_value, set_value
from .repository import require_repo
def list_plugins(directory=None):
"""Gets a list of the installed themes."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins')
if not plugins or not isinstance(plugins, dict):
return None
return plugins.keys()
def remove_plugin(plugin, directory=None):
"""Removes the specified plugin."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin not in plugins:
return False
del plugins[plugin]
set_value(repo, 'plugins', plugins)
return True
def get_plugin_settings(plugin, directory=None):
"""Gets the settings for the specified plugin."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins')
return plugins.get(plugin) if isinstance(plugins, dict) else None
|
joeyespo/gitpress | gitpress/plugins.py | get_plugin_settings | python | def get_plugin_settings(plugin, directory=None):
repo = require_repo(directory)
plugins = get_value(repo, 'plugins')
return plugins.get(plugin) if isinstance(plugins, dict) else None | Gets the settings for the specified plugin. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/plugins.py#L38-L42 | [
"def require_repo(directory=None):\n \"\"\"Checks for a presentation repository and raises an exception if not found.\"\"\"\n if directory and not os.path.isdir(directory):\n raise ValueError('Directory not found: ' + repr(directory))\n repo = repo_path(directory)\n if not os.path.isdir(repo):\n raise RepositoryNotFoundError(directory)\n return repo\n",
"def get_value(repo_directory, key, expect_type=None):\n \"\"\"Gets the value of the specified key in the config file.\"\"\"\n config = read_config(repo_directory)\n value = config.get(key)\n if expect_type and value is not None and not isinstance(value, expect_type):\n raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'\n % (repr(key), repr(expect_type), repr(type(value))))\n return value\n"
] | from .config import get_value, set_value
from .repository import require_repo
def list_plugins(directory=None):
"""Gets a list of the installed themes."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins')
if not plugins or not isinstance(plugins, dict):
return None
return plugins.keys()
def add_plugin(plugin, directory=None):
"""Adds the specified plugin. This returns False if it was already added."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin in plugins:
return False
plugins[plugin] = {}
set_value(repo, 'plugins', plugins)
return True
def remove_plugin(plugin, directory=None):
"""Removes the specified plugin."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin not in plugins:
return False
del plugins[plugin]
set_value(repo, 'plugins', plugins)
return True
|
joeyespo/gitpress | gitpress/previewing.py | preview | python | def preview(directory=None, host=None, port=None, watch=True):
directory = directory or '.'
host = host or '127.0.0.1'
port = port or 5000
# TODO: admin interface
# TODO: use cache_only to keep from modifying output directly
out_directory = build(directory)
# Serve generated site
os.chdir(out_directory)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer((host, port), Handler)
print ' * Serving on http://%s:%s/' % (host, port)
httpd.serve_forever() | Runs a local server to preview the working directory of a repository. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/previewing.py#L7-L23 | [
"def build(content_directory=None, out_directory=None):\n \"\"\"Builds the site from its content and presentation repository.\"\"\"\n content_directory = content_directory or '.'\n out_directory = os.path.abspath(out_directory or default_out_directory)\n repo = require_repo(content_directory)\n\n # Prevent user mistakes\n if out_directory == '.':\n raise ValueError('Output directory must be different than the source directory: ' + repr(out_directory))\n if os.path.basename(os.path.relpath(out_directory, content_directory)) == '..':\n raise ValueError('Output directory must not contain the source directory: ' + repr(out_directory))\n\n # TODO: read config\n # TODO: use virtualenv\n # TODO: init and run plugins\n # TODO: process with active theme\n\n # Collect and copy static files\n files = presentation_files(repo)\n remove_directory(out_directory)\n copy_files(files, out_directory, repo)\n\n return out_directory\n"
] | import os
import SocketServer
import SimpleHTTPServer
from .building import build
|
joeyespo/gitpress | gitpress/repository.py | require_repo | python | def require_repo(directory=None):
if directory and not os.path.isdir(directory):
raise ValueError('Directory not found: ' + repr(directory))
repo = repo_path(directory)
if not os.path.isdir(repo):
raise RepositoryNotFoundError(directory)
return repo | Checks for a presentation repository and raises an exception if not found. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/repository.py#L30-L37 | [
"def repo_path(directory=None):\n \"\"\"Gets the presentation repository from the specified directory.\"\"\"\n return os.path.join(directory, repo_dir) if directory else repo_dir\n"
] | import os
import re
import shutil
import fnmatch
import subprocess
repo_dir = '.gitpress'
templates_path = os.path.join(os.path.dirname(__file__), 'templates')
default_template_path = os.path.join(templates_path, 'default')
specials = ['.*', '_*']
specials_re = re.compile('|'.join([fnmatch.translate(x) for x in specials]))
class RepositoryAlreadyExistsError(Exception):
"""Indicates 'repo_dir' already exists while attempting to create a new one."""
def __init__(self, directory=None, repo=None):
super(RepositoryAlreadyExistsError, self).__init__()
self.directory = os.path.abspath(directory if directory else os.getcwd())
self.repo = os.path.abspath(repo or repo_path(self.directory))
class RepositoryNotFoundError(Exception):
"""Indicates an existing 'present_dir' is required, but was not found."""
def __init__(self, directory=None):
super(RepositoryNotFoundError, self).__init__()
self.directory = os.path.abspath(directory if directory else os.getcwd())
def repo_path(directory=None):
"""Gets the presentation repository from the specified directory."""
return os.path.join(directory, repo_dir) if directory else repo_dir
def init(directory=None):
"""Initializes a Gitpress presentation repository at the specified directory."""
repo = repo_path(directory)
if os.path.isdir(repo):
raise RepositoryAlreadyExistsError(directory, repo)
# Initialize repository with default template
shutil.copytree(default_template_path, repo)
message = '"Default presentation content."'
subprocess.call(['git', 'init', '-q', repo])
subprocess.call(['git', 'add', '.'], cwd=repo)
subprocess.call(['git', 'commit', '-q', '-m', message], cwd=repo)
return repo
def presentation_files(path=None, excludes=None, includes=None):
"""Gets a list of the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
return list(iterate_presentation_files(path, excludes, includes))
def iterate_presentation_files(path=None, excludes=None, includes=None):
"""Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
# Defaults
if includes is None:
includes = []
if excludes is None:
excludes = []
# Transform glob patterns to regular expressions
includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.'
excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
includes_re = re.compile(includes_pattern)
excludes_re = re.compile(excludes_pattern)
def included(root, name):
"""Returns True if the specified file is a presentation file."""
full_path = os.path.join(root, name)
# Explicitly included files takes priority
if includes_re.match(full_path):
return True
# Ignore special and excluded files
return (not specials_re.match(name)
and not excludes_re.match(full_path))
# Get a filtered list of paths to be built
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if included(root, d)]
files = [f for f in files if included(root, f)]
for f in files:
yield os.path.relpath(os.path.join(root, f), path)
|
joeyespo/gitpress | gitpress/repository.py | init | python | def init(directory=None):
repo = repo_path(directory)
if os.path.isdir(repo):
raise RepositoryAlreadyExistsError(directory, repo)
# Initialize repository with default template
shutil.copytree(default_template_path, repo)
message = '"Default presentation content."'
subprocess.call(['git', 'init', '-q', repo])
subprocess.call(['git', 'add', '.'], cwd=repo)
subprocess.call(['git', 'commit', '-q', '-m', message], cwd=repo)
return repo | Initializes a Gitpress presentation repository at the specified directory. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/repository.py#L45-L59 | [
"def repo_path(directory=None):\n \"\"\"Gets the presentation repository from the specified directory.\"\"\"\n return os.path.join(directory, repo_dir) if directory else repo_dir\n"
] | import os
import re
import shutil
import fnmatch
import subprocess
repo_dir = '.gitpress'
templates_path = os.path.join(os.path.dirname(__file__), 'templates')
default_template_path = os.path.join(templates_path, 'default')
specials = ['.*', '_*']
specials_re = re.compile('|'.join([fnmatch.translate(x) for x in specials]))
class RepositoryAlreadyExistsError(Exception):
"""Indicates 'repo_dir' already exists while attempting to create a new one."""
def __init__(self, directory=None, repo=None):
super(RepositoryAlreadyExistsError, self).__init__()
self.directory = os.path.abspath(directory if directory else os.getcwd())
self.repo = os.path.abspath(repo or repo_path(self.directory))
class RepositoryNotFoundError(Exception):
"""Indicates an existing 'present_dir' is required, but was not found."""
def __init__(self, directory=None):
super(RepositoryNotFoundError, self).__init__()
self.directory = os.path.abspath(directory if directory else os.getcwd())
def require_repo(directory=None):
"""Checks for a presentation repository and raises an exception if not found."""
if directory and not os.path.isdir(directory):
raise ValueError('Directory not found: ' + repr(directory))
repo = repo_path(directory)
if not os.path.isdir(repo):
raise RepositoryNotFoundError(directory)
return repo
def repo_path(directory=None):
"""Gets the presentation repository from the specified directory."""
return os.path.join(directory, repo_dir) if directory else repo_dir
def presentation_files(path=None, excludes=None, includes=None):
"""Gets a list of the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
return list(iterate_presentation_files(path, excludes, includes))
def iterate_presentation_files(path=None, excludes=None, includes=None):
"""Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
# Defaults
if includes is None:
includes = []
if excludes is None:
excludes = []
# Transform glob patterns to regular expressions
includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.'
excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
includes_re = re.compile(includes_pattern)
excludes_re = re.compile(excludes_pattern)
def included(root, name):
"""Returns True if the specified file is a presentation file."""
full_path = os.path.join(root, name)
# Explicitly included files takes priority
if includes_re.match(full_path):
return True
# Ignore special and excluded files
return (not specials_re.match(name)
and not excludes_re.match(full_path))
# Get a filtered list of paths to be built
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if included(root, d)]
files = [f for f in files if included(root, f)]
for f in files:
yield os.path.relpath(os.path.join(root, f), path)
|
joeyespo/gitpress | gitpress/repository.py | iterate_presentation_files | python | def iterate_presentation_files(path=None, excludes=None, includes=None):
# Defaults
if includes is None:
includes = []
if excludes is None:
excludes = []
# Transform glob patterns to regular expressions
includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.'
excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
includes_re = re.compile(includes_pattern)
excludes_re = re.compile(excludes_pattern)
def included(root, name):
"""Returns True if the specified file is a presentation file."""
full_path = os.path.join(root, name)
# Explicitly included files takes priority
if includes_re.match(full_path):
return True
# Ignore special and excluded files
return (not specials_re.match(name)
and not excludes_re.match(full_path))
# Get a filtered list of paths to be built
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if included(root, d)]
files = [f for f in files if included(root, f)]
for f in files:
yield os.path.relpath(os.path.join(root, f), path) | Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/repository.py#L68-L99 | null | import os
import re
import shutil
import fnmatch
import subprocess
repo_dir = '.gitpress'
templates_path = os.path.join(os.path.dirname(__file__), 'templates')
default_template_path = os.path.join(templates_path, 'default')
specials = ['.*', '_*']
specials_re = re.compile('|'.join([fnmatch.translate(x) for x in specials]))
class RepositoryAlreadyExistsError(Exception):
"""Indicates 'repo_dir' already exists while attempting to create a new one."""
def __init__(self, directory=None, repo=None):
super(RepositoryAlreadyExistsError, self).__init__()
self.directory = os.path.abspath(directory if directory else os.getcwd())
self.repo = os.path.abspath(repo or repo_path(self.directory))
class RepositoryNotFoundError(Exception):
"""Indicates an existing 'present_dir' is required, but was not found."""
def __init__(self, directory=None):
super(RepositoryNotFoundError, self).__init__()
self.directory = os.path.abspath(directory if directory else os.getcwd())
def require_repo(directory=None):
"""Checks for a presentation repository and raises an exception if not found."""
if directory and not os.path.isdir(directory):
raise ValueError('Directory not found: ' + repr(directory))
repo = repo_path(directory)
if not os.path.isdir(repo):
raise RepositoryNotFoundError(directory)
return repo
def repo_path(directory=None):
"""Gets the presentation repository from the specified directory."""
return os.path.join(directory, repo_dir) if directory else repo_dir
def init(directory=None):
"""Initializes a Gitpress presentation repository at the specified directory."""
repo = repo_path(directory)
if os.path.isdir(repo):
raise RepositoryAlreadyExistsError(directory, repo)
# Initialize repository with default template
shutil.copytree(default_template_path, repo)
message = '"Default presentation content."'
subprocess.call(['git', 'init', '-q', repo])
subprocess.call(['git', 'add', '.'], cwd=repo)
subprocess.call(['git', 'commit', '-q', '-m', message], cwd=repo)
return repo
def presentation_files(path=None, excludes=None, includes=None):
"""Gets a list of the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
return list(iterate_presentation_files(path, excludes, includes))
|
joeyespo/gitpress | gitpress/config.py | read_config_file | python | def read_config_file(path):
try:
with open(path, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
except IOError as ex:
if ex != errno.ENOENT:
raise
return {} | Returns the configuration from the specified file. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/config.py#L23-L31 | null | import os
import errno
from collections import OrderedDict
try:
import simplejson as json
except ImportError:
import json
config_file = '_config.json'
class ConfigSchemaError(Exception):
"""Indicates the config does not conform to the expected types."""
pass
def read_config(repo_directory):
"""Returns the configuration from the presentation repository."""
return read_config_file(os.path.join(repo_directory, config_file))
def write_config(repo_directory, config):
"""Writes the specified configuration to the presentation repository."""
return write_config_file(os.path.join(repo_directory, config_file), config)
def write_config_file(path, config):
"""Writes the specified configuration to the specified file."""
contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n'
try:
with open(path, 'w') as f:
f.write(contents)
return True
except IOError as ex:
if ex != errno.ENOENT:
raise
return False
def get_value(repo_directory, key, expect_type=None):
"""Gets the value of the specified key in the config file."""
config = read_config(repo_directory)
value = config.get(key)
if expect_type and value is not None and not isinstance(value, expect_type):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(expect_type), repr(type(value))))
return value
def set_value(repo_directory, key, value, strict=True):
"""Sets the value of a particular key in the config file. This has no effect when setting to the same value."""
if value is None:
raise ValueError('Argument "value" must not be None.')
# Read values and do nothing if not making any changes
config = read_config(repo_directory)
old = config.get(key)
if old == value:
return old
# Check schema
if strict and old is not None and not isinstance(old, type(value)):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(type(value)), repr(type(old))))
# Set new value and save results
config[key] = value
write_config(repo_directory, config)
return old
|
joeyespo/gitpress | gitpress/config.py | write_config | python | def write_config(repo_directory, config):
return write_config_file(os.path.join(repo_directory, config_file), config) | Writes the specified configuration to the presentation repository. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/config.py#L34-L36 | [
"def write_config_file(path, config):\n \"\"\"Writes the specified configuration to the specified file.\"\"\"\n contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\\n'\n try:\n with open(path, 'w') as f:\n f.write(contents)\n return True\n except IOError as ex:\n if ex != errno.ENOENT:\n raise\n return False\n"
] | import os
import errno
from collections import OrderedDict
try:
import simplejson as json
except ImportError:
import json
config_file = '_config.json'
class ConfigSchemaError(Exception):
"""Indicates the config does not conform to the expected types."""
pass
def read_config(repo_directory):
"""Returns the configuration from the presentation repository."""
return read_config_file(os.path.join(repo_directory, config_file))
def read_config_file(path):
"""Returns the configuration from the specified file."""
try:
with open(path, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
except IOError as ex:
if ex != errno.ENOENT:
raise
return {}
def write_config_file(path, config):
"""Writes the specified configuration to the specified file."""
contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n'
try:
with open(path, 'w') as f:
f.write(contents)
return True
except IOError as ex:
if ex != errno.ENOENT:
raise
return False
def get_value(repo_directory, key, expect_type=None):
"""Gets the value of the specified key in the config file."""
config = read_config(repo_directory)
value = config.get(key)
if expect_type and value is not None and not isinstance(value, expect_type):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(expect_type), repr(type(value))))
return value
def set_value(repo_directory, key, value, strict=True):
"""Sets the value of a particular key in the config file. This has no effect when setting to the same value."""
if value is None:
raise ValueError('Argument "value" must not be None.')
# Read values and do nothing if not making any changes
config = read_config(repo_directory)
old = config.get(key)
if old == value:
return old
# Check schema
if strict and old is not None and not isinstance(old, type(value)):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(type(value)), repr(type(old))))
# Set new value and save results
config[key] = value
write_config(repo_directory, config)
return old
|
joeyespo/gitpress | gitpress/config.py | write_config_file | python | def write_config_file(path, config):
contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n'
try:
with open(path, 'w') as f:
f.write(contents)
return True
except IOError as ex:
if ex != errno.ENOENT:
raise
return False | Writes the specified configuration to the specified file. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/config.py#L39-L49 | null | import os
import errno
from collections import OrderedDict
try:
import simplejson as json
except ImportError:
import json
config_file = '_config.json'
class ConfigSchemaError(Exception):
"""Indicates the config does not conform to the expected types."""
pass
def read_config(repo_directory):
"""Returns the configuration from the presentation repository."""
return read_config_file(os.path.join(repo_directory, config_file))
def read_config_file(path):
"""Returns the configuration from the specified file."""
try:
with open(path, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
except IOError as ex:
if ex != errno.ENOENT:
raise
return {}
def write_config(repo_directory, config):
"""Writes the specified configuration to the presentation repository."""
return write_config_file(os.path.join(repo_directory, config_file), config)
def get_value(repo_directory, key, expect_type=None):
"""Gets the value of the specified key in the config file."""
config = read_config(repo_directory)
value = config.get(key)
if expect_type and value is not None and not isinstance(value, expect_type):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(expect_type), repr(type(value))))
return value
def set_value(repo_directory, key, value, strict=True):
"""Sets the value of a particular key in the config file. This has no effect when setting to the same value."""
if value is None:
raise ValueError('Argument "value" must not be None.')
# Read values and do nothing if not making any changes
config = read_config(repo_directory)
old = config.get(key)
if old == value:
return old
# Check schema
if strict and old is not None and not isinstance(old, type(value)):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(type(value)), repr(type(old))))
# Set new value and save results
config[key] = value
write_config(repo_directory, config)
return old
|
joeyespo/gitpress | gitpress/config.py | get_value | python | def get_value(repo_directory, key, expect_type=None):
config = read_config(repo_directory)
value = config.get(key)
if expect_type and value is not None and not isinstance(value, expect_type):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(expect_type), repr(type(value))))
return value | Gets the value of the specified key in the config file. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/config.py#L52-L59 | [
"def read_config(repo_directory):\n \"\"\"Returns the configuration from the presentation repository.\"\"\"\n return read_config_file(os.path.join(repo_directory, config_file))\n"
] | import os
import errno
from collections import OrderedDict
try:
import simplejson as json
except ImportError:
import json
config_file = '_config.json'
class ConfigSchemaError(Exception):
"""Indicates the config does not conform to the expected types."""
pass
def read_config(repo_directory):
"""Returns the configuration from the presentation repository."""
return read_config_file(os.path.join(repo_directory, config_file))
def read_config_file(path):
"""Returns the configuration from the specified file."""
try:
with open(path, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
except IOError as ex:
if ex != errno.ENOENT:
raise
return {}
def write_config(repo_directory, config):
"""Writes the specified configuration to the presentation repository."""
return write_config_file(os.path.join(repo_directory, config_file), config)
def write_config_file(path, config):
"""Writes the specified configuration to the specified file."""
contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n'
try:
with open(path, 'w') as f:
f.write(contents)
return True
except IOError as ex:
if ex != errno.ENOENT:
raise
return False
def set_value(repo_directory, key, value, strict=True):
"""Sets the value of a particular key in the config file. This has no effect when setting to the same value."""
if value is None:
raise ValueError('Argument "value" must not be None.')
# Read values and do nothing if not making any changes
config = read_config(repo_directory)
old = config.get(key)
if old == value:
return old
# Check schema
if strict and old is not None and not isinstance(old, type(value)):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(type(value)), repr(type(old))))
# Set new value and save results
config[key] = value
write_config(repo_directory, config)
return old
|
joeyespo/gitpress | gitpress/config.py | set_value | python | def set_value(repo_directory, key, value, strict=True):
if value is None:
raise ValueError('Argument "value" must not be None.')
# Read values and do nothing if not making any changes
config = read_config(repo_directory)
old = config.get(key)
if old == value:
return old
# Check schema
if strict and old is not None and not isinstance(old, type(value)):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(type(value)), repr(type(old))))
# Set new value and save results
config[key] = value
write_config(repo_directory, config)
return old | Sets the value of a particular key in the config file. This has no effect when setting to the same value. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/config.py#L62-L81 | [
"def read_config(repo_directory):\n \"\"\"Returns the configuration from the presentation repository.\"\"\"\n return read_config_file(os.path.join(repo_directory, config_file))\n",
"def write_config(repo_directory, config):\n \"\"\"Writes the specified configuration to the presentation repository.\"\"\"\n return write_config_file(os.path.join(repo_directory, config_file), config)\n"
] | import os
import errno
from collections import OrderedDict
try:
import simplejson as json
except ImportError:
import json
config_file = '_config.json'
class ConfigSchemaError(Exception):
"""Indicates the config does not conform to the expected types."""
pass
def read_config(repo_directory):
"""Returns the configuration from the presentation repository."""
return read_config_file(os.path.join(repo_directory, config_file))
def read_config_file(path):
"""Returns the configuration from the specified file."""
try:
with open(path, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
except IOError as ex:
if ex != errno.ENOENT:
raise
return {}
def write_config(repo_directory, config):
"""Writes the specified configuration to the presentation repository."""
return write_config_file(os.path.join(repo_directory, config_file), config)
def write_config_file(path, config):
"""Writes the specified configuration to the specified file."""
contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n'
try:
with open(path, 'w') as f:
f.write(contents)
return True
except IOError as ex:
if ex != errno.ENOENT:
raise
return False
def get_value(repo_directory, key, expect_type=None):
"""Gets the value of the specified key in the config file."""
config = read_config(repo_directory)
value = config.get(key)
if expect_type and value is not None and not isinstance(value, expect_type):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(expect_type), repr(type(value))))
return value
|
joeyespo/gitpress | gitpress/building.py | build | python | def build(content_directory=None, out_directory=None):
content_directory = content_directory or '.'
out_directory = os.path.abspath(out_directory or default_out_directory)
repo = require_repo(content_directory)
# Prevent user mistakes
if out_directory == '.':
raise ValueError('Output directory must be different than the source directory: ' + repr(out_directory))
if os.path.basename(os.path.relpath(out_directory, content_directory)) == '..':
raise ValueError('Output directory must not contain the source directory: ' + repr(out_directory))
# TODO: read config
# TODO: use virtualenv
# TODO: init and run plugins
# TODO: process with active theme
# Collect and copy static files
files = presentation_files(repo)
remove_directory(out_directory)
copy_files(files, out_directory, repo)
return out_directory | Builds the site from its content and presentation repository. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/building.py#L9-L31 | [
"def require_repo(directory=None):\n \"\"\"Checks for a presentation repository and raises an exception if not found.\"\"\"\n if directory and not os.path.isdir(directory):\n raise ValueError('Directory not found: ' + repr(directory))\n repo = repo_path(directory)\n if not os.path.isdir(repo):\n raise RepositoryNotFoundError(directory)\n return repo\n",
"def presentation_files(path=None, excludes=None, includes=None):\n \"\"\"Gets a list of the repository presentation files relative to 'path',\n not including themes. Note that 'includes' take priority.\"\"\"\n return list(iterate_presentation_files(path, excludes, includes))\n",
"def copy_files(source_files, target_directory, source_directory=None):\n \"\"\"Copies a list of files to the specified directory.\n If source_directory is provided, it will be prepended to each source file.\"\"\"\n try:\n os.makedirs(target_directory)\n except: # TODO: specific exception?\n pass\n for f in source_files:\n source = os.path.join(source_directory, f) if source_directory else f\n target = os.path.join(target_directory, f)\n shutil.copy2(source, target)\n",
"def remove_directory(directory, show_warnings=True):\n \"\"\"Deletes a directory and its contents.\n Returns a list of errors in form (function, path, excinfo).\"\"\"\n errors = []\n\n def onerror(function, path, excinfo):\n if show_warnings:\n print 'Cannot delete %s: %s' % (os.path.relpath(directory), excinfo[1])\n errors.append((function, path, excinfo))\n\n if os.path.exists(directory):\n if not os.path.isdir(directory):\n raise NotADirectoryError(directory)\n shutil.rmtree(directory, onerror=onerror)\n\n return errors\n"
] | import os
from .repository import require_repo, presentation_files
from .helpers import copy_files, remove_directory
default_out_directory = '_site'
|
joeyespo/gitpress | gitpress/command.py | main | python | def main(argv=None):
if argv is None:
argv = sys.argv[1:]
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
version = 'Gitpress ' + __version__
# Parse options
args = docopt(usage, argv=argv, version=version)
# Execute command
try:
return execute(args)
except RepositoryNotFoundError as ex:
error('No Gitpress repository found at', ex.directory) | The entry point of the application. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/command.py#L40-L54 | [
"def error(*message):\n sys.exit('Error: ' + ' '.join(map(str, message)))\n",
"def execute(args):\n \"\"\"Executes the command indicated by the specified parsed arguments.\"\"\"\n\n def info(*message):\n \"\"\"Displays a message unless -q was specified.\"\"\"\n if not args['-q']:\n print ' '.join(map(str, message))\n\n if args['init']:\n try:\n repo = init(args['<directory>'])\n info('Initialized Gitpress repository in', repo)\n except RepositoryAlreadyExistsError as ex:\n info('Gitpress repository already exists in', ex.repo)\n return 0\n\n if args['preview']:\n directory, address = resolve(args['<directory>'], args['<address>'])\n host, port = split_address(address)\n if address and not host and not port:\n error('Invalid address', repr(address))\n return preview(directory, host=host, port=port)\n\n if args['build']:\n require_repo(args['<directory>'])\n info('Building site', os.path.abspath(args['<directory>'] or '.'))\n try:\n out_directory = build(args['<directory>'], args['--out'])\n except NotADirectoryError as ex:\n error(ex)\n info('Site built in', os.path.abspath(out_directory))\n return 0\n\n if args['themes']:\n theme = args['<theme>']\n if args['use']:\n try:\n switched = use_theme(theme)\n except ConfigSchemaError as ex:\n error('Could not modify config:', ex)\n return 1\n except ThemeNotFoundError as ex:\n error('Theme %s is not currently installed.' % repr(theme))\n return 1\n info('Switched to theme %s' if switched else 'Already using %s' % repr(theme))\n elif args['install']:\n # TODO: implement\n raise NotImplementedError()\n elif args['uninstall']:\n # TODO: implement\n raise NotImplementedError()\n else:\n themes = list_themes()\n if themes:\n info('Installed themes:')\n info(' ' + '\\n '.join(themes))\n else:\n info('No themes installed.')\n return 0\n\n if args['plugins']:\n plugin = args['<plugin>']\n if args['add']:\n try:\n added = add_plugin(plugin)\n except ConfigSchemaError as ex:\n error('Could not modify config:', ex)\n return 1\n info(('Added plugin %s' if added else\n 'Plugin %s has already been added.') % repr(plugin))\n elif args['remove']:\n settings = get_plugin_settings(plugin)\n if not args['-f'] and settings and isinstance(settings, dict):\n warning = 'Plugin %s contains settings. Remove?' % repr(plugin)\n if not yes_or_no(warning):\n return 0\n try:\n removed = remove_plugin(plugin)\n except ConfigSchemaError as ex:\n error('Error: Could not modify config:', ex)\n info(('Removed plugin %s' if removed else\n 'Plugin %s has already been removed.') % repr(plugin))\n else:\n plugins = list_plugins()\n info('Installed plugins:\\n ' + '\\n '.join(plugins) if plugins else\n 'No plugins installed.')\n return 0\n\n return 1\n"
] | """\
gitpress.command
~~~~~~~~~~~~~~~~
Implements the command-line interface of Gitpress.
Usage:
gitpress preview [<directory>] [<address>]
gitpress build [-q] [--out <dir>] [<directory>]
gitpress init [-q] [<directory>]
gitpress themes [use <theme> | install <theme> | uninstall <theme>]
gitpress plugins [add <plugin> | remove [-f] <plugin>]
Options:
-h --help Show this help.
--version Show version.
-o --out <dir> The directory to output the rendered site.
-f Force the command to continue without prompting.
-q Quiet mode, suppress all messages except errors.
Notes:
<address> can take the form <host>[:<port>] or just <port>.
"""
import os
import sys
from docopt import docopt
from path_and_address import resolve, split_address
from .config import ConfigSchemaError
from .repository import init, require_repo, RepositoryAlreadyExistsError, RepositoryNotFoundError
from .previewing import preview
from .building import build
from .themes import list_themes, use_theme, ThemeNotFoundError
from .plugins import list_plugins, add_plugin, remove_plugin, get_plugin_settings
from .helpers import yes_or_no, NotADirectoryError
from . import __version__
def execute(args):
"""Executes the command indicated by the specified parsed arguments."""
def info(*message):
"""Displays a message unless -q was specified."""
if not args['-q']:
print ' '.join(map(str, message))
if args['init']:
try:
repo = init(args['<directory>'])
info('Initialized Gitpress repository in', repo)
except RepositoryAlreadyExistsError as ex:
info('Gitpress repository already exists in', ex.repo)
return 0
if args['preview']:
directory, address = resolve(args['<directory>'], args['<address>'])
host, port = split_address(address)
if address and not host and not port:
error('Invalid address', repr(address))
return preview(directory, host=host, port=port)
if args['build']:
require_repo(args['<directory>'])
info('Building site', os.path.abspath(args['<directory>'] or '.'))
try:
out_directory = build(args['<directory>'], args['--out'])
except NotADirectoryError as ex:
error(ex)
info('Site built in', os.path.abspath(out_directory))
return 0
if args['themes']:
theme = args['<theme>']
if args['use']:
try:
switched = use_theme(theme)
except ConfigSchemaError as ex:
error('Could not modify config:', ex)
return 1
except ThemeNotFoundError as ex:
error('Theme %s is not currently installed.' % repr(theme))
return 1
info('Switched to theme %s' if switched else 'Already using %s' % repr(theme))
elif args['install']:
# TODO: implement
raise NotImplementedError()
elif args['uninstall']:
# TODO: implement
raise NotImplementedError()
else:
themes = list_themes()
if themes:
info('Installed themes:')
info(' ' + '\n '.join(themes))
else:
info('No themes installed.')
return 0
if args['plugins']:
plugin = args['<plugin>']
if args['add']:
try:
added = add_plugin(plugin)
except ConfigSchemaError as ex:
error('Could not modify config:', ex)
return 1
info(('Added plugin %s' if added else
'Plugin %s has already been added.') % repr(plugin))
elif args['remove']:
settings = get_plugin_settings(plugin)
if not args['-f'] and settings and isinstance(settings, dict):
warning = 'Plugin %s contains settings. Remove?' % repr(plugin)
if not yes_or_no(warning):
return 0
try:
removed = remove_plugin(plugin)
except ConfigSchemaError as ex:
error('Error: Could not modify config:', ex)
info(('Removed plugin %s' if removed else
'Plugin %s has already been removed.') % repr(plugin))
else:
plugins = list_plugins()
info('Installed plugins:\n ' + '\n '.join(plugins) if plugins else
'No plugins installed.')
return 0
return 1
def error(*message):
sys.exit('Error: ' + ' '.join(map(str, message)))
def gpp(argv=None):
"""Shortcut function for running the previewing command."""
if argv is None:
argv = sys.argv[1:]
argv.insert(0, 'preview')
return main(argv)
|
joeyespo/gitpress | gitpress/command.py | execute | python | def execute(args):
def info(*message):
"""Displays a message unless -q was specified."""
if not args['-q']:
print ' '.join(map(str, message))
if args['init']:
try:
repo = init(args['<directory>'])
info('Initialized Gitpress repository in', repo)
except RepositoryAlreadyExistsError as ex:
info('Gitpress repository already exists in', ex.repo)
return 0
if args['preview']:
directory, address = resolve(args['<directory>'], args['<address>'])
host, port = split_address(address)
if address and not host and not port:
error('Invalid address', repr(address))
return preview(directory, host=host, port=port)
if args['build']:
require_repo(args['<directory>'])
info('Building site', os.path.abspath(args['<directory>'] or '.'))
try:
out_directory = build(args['<directory>'], args['--out'])
except NotADirectoryError as ex:
error(ex)
info('Site built in', os.path.abspath(out_directory))
return 0
if args['themes']:
theme = args['<theme>']
if args['use']:
try:
switched = use_theme(theme)
except ConfigSchemaError as ex:
error('Could not modify config:', ex)
return 1
except ThemeNotFoundError as ex:
error('Theme %s is not currently installed.' % repr(theme))
return 1
info('Switched to theme %s' if switched else 'Already using %s' % repr(theme))
elif args['install']:
# TODO: implement
raise NotImplementedError()
elif args['uninstall']:
# TODO: implement
raise NotImplementedError()
else:
themes = list_themes()
if themes:
info('Installed themes:')
info(' ' + '\n '.join(themes))
else:
info('No themes installed.')
return 0
if args['plugins']:
plugin = args['<plugin>']
if args['add']:
try:
added = add_plugin(plugin)
except ConfigSchemaError as ex:
error('Could not modify config:', ex)
return 1
info(('Added plugin %s' if added else
'Plugin %s has already been added.') % repr(plugin))
elif args['remove']:
settings = get_plugin_settings(plugin)
if not args['-f'] and settings and isinstance(settings, dict):
warning = 'Plugin %s contains settings. Remove?' % repr(plugin)
if not yes_or_no(warning):
return 0
try:
removed = remove_plugin(plugin)
except ConfigSchemaError as ex:
error('Error: Could not modify config:', ex)
info(('Removed plugin %s' if removed else
'Plugin %s has already been removed.') % repr(plugin))
else:
plugins = list_plugins()
info('Installed plugins:\n ' + '\n '.join(plugins) if plugins else
'No plugins installed.')
return 0
return 1 | Executes the command indicated by the specified parsed arguments. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/command.py#L57-L145 | [
"def error(*message):\n sys.exit('Error: ' + ' '.join(map(str, message)))\n",
"def init(directory=None):\n \"\"\"Initializes a Gitpress presentation repository at the specified directory.\"\"\"\n repo = repo_path(directory)\n if os.path.isdir(repo):\n raise RepositoryAlreadyExistsError(directory, repo)\n\n # Initialize repository with default template\n shutil.copytree(default_template_path, repo)\n\n message = '\"Default presentation content.\"'\n subprocess.call(['git', 'init', '-q', repo])\n subprocess.call(['git', 'add', '.'], cwd=repo)\n subprocess.call(['git', 'commit', '-q', '-m', message], cwd=repo)\n\n return repo\n",
"def require_repo(directory=None):\n \"\"\"Checks for a presentation repository and raises an exception if not found.\"\"\"\n if directory and not os.path.isdir(directory):\n raise ValueError('Directory not found: ' + repr(directory))\n repo = repo_path(directory)\n if not os.path.isdir(repo):\n raise RepositoryNotFoundError(directory)\n return repo\n",
"def build(content_directory=None, out_directory=None):\n \"\"\"Builds the site from its content and presentation repository.\"\"\"\n content_directory = content_directory or '.'\n out_directory = os.path.abspath(out_directory or default_out_directory)\n repo = require_repo(content_directory)\n\n # Prevent user mistakes\n if out_directory == '.':\n raise ValueError('Output directory must be different than the source directory: ' + repr(out_directory))\n if os.path.basename(os.path.relpath(out_directory, content_directory)) == '..':\n raise ValueError('Output directory must not contain the source directory: ' + repr(out_directory))\n\n # TODO: read config\n # TODO: use virtualenv\n # TODO: init and run plugins\n # TODO: process with active theme\n\n # Collect and copy static files\n files = presentation_files(repo)\n remove_directory(out_directory)\n copy_files(files, out_directory, repo)\n\n return out_directory\n",
"def preview(directory=None, host=None, port=None, watch=True):\n \"\"\"Runs a local server to preview the working directory of a repository.\"\"\"\n directory = directory or '.'\n host = host or '127.0.0.1'\n port = port or 5000\n\n # TODO: admin interface\n\n # TODO: use cache_only to keep from modifying output directly\n out_directory = build(directory)\n\n # Serve generated site\n os.chdir(out_directory)\n Handler = SimpleHTTPServer.SimpleHTTPRequestHandler\n httpd = SocketServer.TCPServer((host, port), Handler)\n print ' * Serving on http://%s:%s/' % (host, port)\n httpd.serve_forever()\n",
"def list_themes(directory=None):\n \"\"\"Gets a list of the installed themes.\"\"\"\n repo = require_repo(directory)\n path = os.path.join(repo, themes_dir)\n return os.listdir(path) if os.path.isdir(path) else None\n",
"def use_theme(theme, directory=None):\n \"\"\"Switches to the specified theme. This returns False if switching to the already active theme.\"\"\"\n repo = require_repo(directory)\n if theme not in list_themes(directory):\n raise ThemeNotFoundError(theme)\n\n old_theme = set_value(repo, 'theme', theme)\n return old_theme != theme\n",
"def list_plugins(directory=None):\n \"\"\"Gets a list of the installed themes.\"\"\"\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins')\n if not plugins or not isinstance(plugins, dict):\n return None\n return plugins.keys()\n",
"def add_plugin(plugin, directory=None):\n \"\"\"Adds the specified plugin. This returns False if it was already added.\"\"\"\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins', expect_type=dict)\n if plugin in plugins:\n return False\n\n plugins[plugin] = {}\n set_value(repo, 'plugins', plugins)\n return True\n",
"def remove_plugin(plugin, directory=None):\n \"\"\"Removes the specified plugin.\"\"\"\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins', expect_type=dict)\n if plugin not in plugins:\n return False\n\n del plugins[plugin]\n set_value(repo, 'plugins', plugins)\n return True\n",
"def get_plugin_settings(plugin, directory=None):\n \"\"\"Gets the settings for the specified plugin.\"\"\"\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins')\n return plugins.get(plugin) if isinstance(plugins, dict) else None\n",
"def yes_or_no(message):\n \"\"\"Gets user input and returns True for yes and False for no.\"\"\"\n while True:\n print message, '(yes/no)',\n line = raw_input()\n if line is None:\n return None\n line = line.lower()\n if line == 'y' or line == 'ye' or line == 'yes':\n return True\n if line == 'n' or line == 'no':\n return False\n",
"def info(*message):\n \"\"\"Displays a message unless -q was specified.\"\"\"\n if not args['-q']:\n print ' '.join(map(str, message))\n"
] | """\
gitpress.command
~~~~~~~~~~~~~~~~
Implements the command-line interface of Gitpress.
Usage:
gitpress preview [<directory>] [<address>]
gitpress build [-q] [--out <dir>] [<directory>]
gitpress init [-q] [<directory>]
gitpress themes [use <theme> | install <theme> | uninstall <theme>]
gitpress plugins [add <plugin> | remove [-f] <plugin>]
Options:
-h --help Show this help.
--version Show version.
-o --out <dir> The directory to output the rendered site.
-f Force the command to continue without prompting.
-q Quiet mode, suppress all messages except errors.
Notes:
<address> can take the form <host>[:<port>] or just <port>.
"""
import os
import sys
from docopt import docopt
from path_and_address import resolve, split_address
from .config import ConfigSchemaError
from .repository import init, require_repo, RepositoryAlreadyExistsError, RepositoryNotFoundError
from .previewing import preview
from .building import build
from .themes import list_themes, use_theme, ThemeNotFoundError
from .plugins import list_plugins, add_plugin, remove_plugin, get_plugin_settings
from .helpers import yes_or_no, NotADirectoryError
from . import __version__
def main(argv=None):
"""The entry point of the application."""
if argv is None:
argv = sys.argv[1:]
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
version = 'Gitpress ' + __version__
# Parse options
args = docopt(usage, argv=argv, version=version)
# Execute command
try:
return execute(args)
except RepositoryNotFoundError as ex:
error('No Gitpress repository found at', ex.directory)
def error(*message):
sys.exit('Error: ' + ' '.join(map(str, message)))
def gpp(argv=None):
"""Shortcut function for running the previewing command."""
if argv is None:
argv = sys.argv[1:]
argv.insert(0, 'preview')
return main(argv)
|
joeyespo/gitpress | gitpress/command.py | gpp | python | def gpp(argv=None):
if argv is None:
argv = sys.argv[1:]
argv.insert(0, 'preview')
return main(argv) | Shortcut function for running the previewing command. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/command.py#L152-L157 | [
"def main(argv=None):\n \"\"\"The entry point of the application.\"\"\"\n if argv is None:\n argv = sys.argv[1:]\n usage = '\\n\\n\\n'.join(__doc__.split('\\n\\n\\n')[1:])\n version = 'Gitpress ' + __version__\n\n # Parse options\n args = docopt(usage, argv=argv, version=version)\n\n # Execute command\n try:\n return execute(args)\n except RepositoryNotFoundError as ex:\n error('No Gitpress repository found at', ex.directory)\n"
] | """\
gitpress.command
~~~~~~~~~~~~~~~~
Implements the command-line interface of Gitpress.
Usage:
gitpress preview [<directory>] [<address>]
gitpress build [-q] [--out <dir>] [<directory>]
gitpress init [-q] [<directory>]
gitpress themes [use <theme> | install <theme> | uninstall <theme>]
gitpress plugins [add <plugin> | remove [-f] <plugin>]
Options:
-h --help Show this help.
--version Show version.
-o --out <dir> The directory to output the rendered site.
-f Force the command to continue without prompting.
-q Quiet mode, suppress all messages except errors.
Notes:
<address> can take the form <host>[:<port>] or just <port>.
"""
import os
import sys
from docopt import docopt
from path_and_address import resolve, split_address
from .config import ConfigSchemaError
from .repository import init, require_repo, RepositoryAlreadyExistsError, RepositoryNotFoundError
from .previewing import preview
from .building import build
from .themes import list_themes, use_theme, ThemeNotFoundError
from .plugins import list_plugins, add_plugin, remove_plugin, get_plugin_settings
from .helpers import yes_or_no, NotADirectoryError
from . import __version__
def main(argv=None):
"""The entry point of the application."""
if argv is None:
argv = sys.argv[1:]
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
version = 'Gitpress ' + __version__
# Parse options
args = docopt(usage, argv=argv, version=version)
# Execute command
try:
return execute(args)
except RepositoryNotFoundError as ex:
error('No Gitpress repository found at', ex.directory)
def execute(args):
"""Executes the command indicated by the specified parsed arguments."""
def info(*message):
"""Displays a message unless -q was specified."""
if not args['-q']:
print ' '.join(map(str, message))
if args['init']:
try:
repo = init(args['<directory>'])
info('Initialized Gitpress repository in', repo)
except RepositoryAlreadyExistsError as ex:
info('Gitpress repository already exists in', ex.repo)
return 0
if args['preview']:
directory, address = resolve(args['<directory>'], args['<address>'])
host, port = split_address(address)
if address and not host and not port:
error('Invalid address', repr(address))
return preview(directory, host=host, port=port)
if args['build']:
require_repo(args['<directory>'])
info('Building site', os.path.abspath(args['<directory>'] or '.'))
try:
out_directory = build(args['<directory>'], args['--out'])
except NotADirectoryError as ex:
error(ex)
info('Site built in', os.path.abspath(out_directory))
return 0
if args['themes']:
theme = args['<theme>']
if args['use']:
try:
switched = use_theme(theme)
except ConfigSchemaError as ex:
error('Could not modify config:', ex)
return 1
except ThemeNotFoundError as ex:
error('Theme %s is not currently installed.' % repr(theme))
return 1
info('Switched to theme %s' if switched else 'Already using %s' % repr(theme))
elif args['install']:
# TODO: implement
raise NotImplementedError()
elif args['uninstall']:
# TODO: implement
raise NotImplementedError()
else:
themes = list_themes()
if themes:
info('Installed themes:')
info(' ' + '\n '.join(themes))
else:
info('No themes installed.')
return 0
if args['plugins']:
plugin = args['<plugin>']
if args['add']:
try:
added = add_plugin(plugin)
except ConfigSchemaError as ex:
error('Could not modify config:', ex)
return 1
info(('Added plugin %s' if added else
'Plugin %s has already been added.') % repr(plugin))
elif args['remove']:
settings = get_plugin_settings(plugin)
if not args['-f'] and settings and isinstance(settings, dict):
warning = 'Plugin %s contains settings. Remove?' % repr(plugin)
if not yes_or_no(warning):
return 0
try:
removed = remove_plugin(plugin)
except ConfigSchemaError as ex:
error('Error: Could not modify config:', ex)
info(('Removed plugin %s' if removed else
'Plugin %s has already been removed.') % repr(plugin))
else:
plugins = list_plugins()
info('Installed plugins:\n ' + '\n '.join(plugins) if plugins else
'No plugins installed.')
return 0
return 1
def error(*message):
sys.exit('Error: ' + ' '.join(map(str, message)))
|
joeyespo/gitpress | gitpress/themes.py | list_themes | python | def list_themes(directory=None):
repo = require_repo(directory)
path = os.path.join(repo, themes_dir)
return os.listdir(path) if os.path.isdir(path) else None | Gets a list of the installed themes. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/themes.py#L17-L21 | [
"def require_repo(directory=None):\n \"\"\"Checks for a presentation repository and raises an exception if not found.\"\"\"\n if directory and not os.path.isdir(directory):\n raise ValueError('Directory not found: ' + repr(directory))\n repo = repo_path(directory)\n if not os.path.isdir(repo):\n raise RepositoryNotFoundError(directory)\n return repo\n"
] | import os
from .repository import require_repo
from .config import set_value
themes_dir = '_themes'
default_theme = 'default'
class ThemeNotFoundError(Exception):
"""Indicates the requested theme was not found."""
def __init__(self, theme):
super(ThemeNotFoundError, self).__init__()
self.theme = theme
def use_theme(theme, directory=None):
"""Switches to the specified theme. This returns False if switching to the already active theme."""
repo = require_repo(directory)
if theme not in list_themes(directory):
raise ThemeNotFoundError(theme)
old_theme = set_value(repo, 'theme', theme)
return old_theme != theme
|
joeyespo/gitpress | gitpress/themes.py | use_theme | python | def use_theme(theme, directory=None):
repo = require_repo(directory)
if theme not in list_themes(directory):
raise ThemeNotFoundError(theme)
old_theme = set_value(repo, 'theme', theme)
return old_theme != theme | Switches to the specified theme. This returns False if switching to the already active theme. | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/themes.py#L24-L31 | [
"def require_repo(directory=None):\n \"\"\"Checks for a presentation repository and raises an exception if not found.\"\"\"\n if directory and not os.path.isdir(directory):\n raise ValueError('Directory not found: ' + repr(directory))\n repo = repo_path(directory)\n if not os.path.isdir(repo):\n raise RepositoryNotFoundError(directory)\n return repo\n",
"def list_themes(directory=None):\n \"\"\"Gets a list of the installed themes.\"\"\"\n repo = require_repo(directory)\n path = os.path.join(repo, themes_dir)\n return os.listdir(path) if os.path.isdir(path) else None\n",
"def set_value(repo_directory, key, value, strict=True):\n \"\"\"Sets the value of a particular key in the config file. This has no effect when setting to the same value.\"\"\"\n if value is None:\n raise ValueError('Argument \"value\" must not be None.')\n\n # Read values and do nothing if not making any changes\n config = read_config(repo_directory)\n old = config.get(key)\n if old == value:\n return old\n\n # Check schema\n if strict and old is not None and not isinstance(old, type(value)):\n raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'\n % (repr(key), repr(type(value)), repr(type(old))))\n\n # Set new value and save results\n config[key] = value\n write_config(repo_directory, config)\n return old\n"
] | import os
from .repository import require_repo
from .config import set_value
themes_dir = '_themes'
default_theme = 'default'
class ThemeNotFoundError(Exception):
"""Indicates the requested theme was not found."""
def __init__(self, theme):
super(ThemeNotFoundError, self).__init__()
self.theme = theme
def list_themes(directory=None):
"""Gets a list of the installed themes."""
repo = require_repo(directory)
path = os.path.join(repo, themes_dir)
return os.listdir(path) if os.path.isdir(path) else None
|
redcap-tools/PyCap | redcap/request.py | RCRequest.validate | python | def validate(self):
required = ['token', 'content']
valid_data = {
'exp_record': (['type', 'format'], 'record',
'Exporting record but content is not record'),
'imp_record': (['type', 'overwriteBehavior', 'data', 'format'],
'record', 'Importing record but content is not record'),
'metadata': (['format'], 'metadata',
'Requesting metadata but content != metadata'),
'exp_file': (['action', 'record', 'field'], 'file',
'Exporting file but content is not file'),
'imp_file': (['action', 'record', 'field'], 'file',
'Importing file but content is not file'),
'del_file': (['action', 'record', 'field'], 'file',
'Deleteing file but content is not file'),
'exp_event': (['format'], 'event',
'Exporting events but content is not event'),
'exp_arm': (['format'], 'arm',
'Exporting arms but content is not arm'),
'exp_fem': (['format'], 'formEventMapping',
'Exporting form-event mappings but content != formEventMapping'),
'exp_user': (['format'], 'user',
'Exporting users but content is not user'),
'exp_survey_participant_list': (['instrument'], 'participantList',
'Exporting Survey Participant List but content != participantList'),
'version': (['format'], 'version',
'Requesting version but content != version')
}
extra, req_content, err_msg = valid_data[self.type]
required.extend(extra)
required = set(required)
pl_keys = set(self.payload.keys())
# if req is not subset of payload keys, this call is wrong
if not set(required) <= pl_keys:
# what is not in pl_keys?
not_pre = required - pl_keys
raise RCAPIError("Required keys: %s" % ', '.join(not_pre))
# Check content, raise with err_msg if not good
try:
if self.payload['content'] != req_content:
raise RCAPIError(err_msg)
except KeyError:
raise RCAPIError('content not in payload') | Checks that at least required params exist | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/request.py#L64-L107 | null | class RCRequest(object):
"""
Private class wrapping the REDCap API. Decodes response from redcap
and returns it.
References
----------
https://redcap.vanderbilt.edu/api/help/
Users shouldn't really need to use this, the Project class is the
biggest consumer.
"""
def __init__(self, url, payload, qtype):
"""
Constructor
Parameters
----------
url : str
REDCap API URL
payload : dict
key,values corresponding to the REDCap API
qtype : str
Used to validate payload contents against API
"""
self.url = url
self.payload = payload
self.type = qtype
if qtype:
self.validate()
fmt_key = 'returnFormat' if 'returnFormat' in payload else 'format'
self.fmt = payload[fmt_key]
def execute(self, **kwargs):
"""Execute the API request and return data
Parameters
----------
kwargs :
passed to requests.post()
Returns
-------
response : list, str
data object from JSON decoding process if format=='json',
else return raw string (ie format=='csv'|'xml')
"""
r = post(self.url, data=self.payload, **kwargs)
# Raise if we need to
self.raise_for_status(r)
content = self.get_content(r)
return content, r.headers
def get_content(self, r):
"""Abstraction for grabbing content from a returned response"""
if self.type == 'exp_file':
# don't use the decoded r.text
return r.content
elif self.type == 'version':
return r.content
else:
if self.fmt == 'json':
content = {}
# Decode
try:
# Watch out for bad/empty json
content = json.loads(r.text, strict=False)
except ValueError as e:
if not self.expect_empty_json():
# reraise for requests that shouldn't send empty json
raise ValueError(e)
finally:
return content
else:
return r.text
def expect_empty_json(self):
"""Some responses are known to send empty responses"""
return self.type in ('imp_file', 'del_file')
def raise_for_status(self, r):
"""Given a response, raise for bad status for certain actions
Some redcap api methods don't return error messages
that the user could test for or otherwise use. Therefore, we
need to do the testing ourself
Raising for everything wouldn't let the user see the
(hopefully helpful) error message"""
if self.type in ('metadata', 'exp_file', 'imp_file', 'del_file'):
r.raise_for_status()
# see http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
# specifically 10.5
if 500 <= r.status_code < 600:
raise RedcapError(r.content)
|
redcap-tools/PyCap | redcap/request.py | RCRequest.execute | python | def execute(self, **kwargs):
r = post(self.url, data=self.payload, **kwargs)
# Raise if we need to
self.raise_for_status(r)
content = self.get_content(r)
return content, r.headers | Execute the API request and return data
Parameters
----------
kwargs :
passed to requests.post()
Returns
-------
response : list, str
data object from JSON decoding process if format=='json',
else return raw string (ie format=='csv'|'xml') | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/request.py#L109-L127 | [
"def get_content(self, r):\n \"\"\"Abstraction for grabbing content from a returned response\"\"\"\n if self.type == 'exp_file':\n # don't use the decoded r.text\n return r.content\n elif self.type == 'version':\n return r.content\n else:\n if self.fmt == 'json':\n content = {}\n # Decode\n try:\n # Watch out for bad/empty json\n content = json.loads(r.text, strict=False)\n except ValueError as e:\n if not self.expect_empty_json():\n # reraise for requests that shouldn't send empty json\n raise ValueError(e)\n finally:\n return content\n else:\n return r.text\n",
"def raise_for_status(self, r):\n \"\"\"Given a response, raise for bad status for certain actions\n\n Some redcap api methods don't return error messages\n that the user could test for or otherwise use. Therefore, we\n need to do the testing ourself\n\n Raising for everything wouldn't let the user see the\n (hopefully helpful) error message\"\"\"\n if self.type in ('metadata', 'exp_file', 'imp_file', 'del_file'):\n r.raise_for_status()\n # see http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html\n # specifically 10.5\n if 500 <= r.status_code < 600:\n raise RedcapError(r.content)\n"
] | class RCRequest(object):
"""
Private class wrapping the REDCap API. Decodes response from redcap
and returns it.
References
----------
https://redcap.vanderbilt.edu/api/help/
Users shouldn't really need to use this, the Project class is the
biggest consumer.
"""
def __init__(self, url, payload, qtype):
"""
Constructor
Parameters
----------
url : str
REDCap API URL
payload : dict
key,values corresponding to the REDCap API
qtype : str
Used to validate payload contents against API
"""
self.url = url
self.payload = payload
self.type = qtype
if qtype:
self.validate()
fmt_key = 'returnFormat' if 'returnFormat' in payload else 'format'
self.fmt = payload[fmt_key]
def validate(self):
"""Checks that at least required params exist"""
required = ['token', 'content']
valid_data = {
'exp_record': (['type', 'format'], 'record',
'Exporting record but content is not record'),
'imp_record': (['type', 'overwriteBehavior', 'data', 'format'],
'record', 'Importing record but content is not record'),
'metadata': (['format'], 'metadata',
'Requesting metadata but content != metadata'),
'exp_file': (['action', 'record', 'field'], 'file',
'Exporting file but content is not file'),
'imp_file': (['action', 'record', 'field'], 'file',
'Importing file but content is not file'),
'del_file': (['action', 'record', 'field'], 'file',
'Deleteing file but content is not file'),
'exp_event': (['format'], 'event',
'Exporting events but content is not event'),
'exp_arm': (['format'], 'arm',
'Exporting arms but content is not arm'),
'exp_fem': (['format'], 'formEventMapping',
'Exporting form-event mappings but content != formEventMapping'),
'exp_user': (['format'], 'user',
'Exporting users but content is not user'),
'exp_survey_participant_list': (['instrument'], 'participantList',
'Exporting Survey Participant List but content != participantList'),
'version': (['format'], 'version',
'Requesting version but content != version')
}
extra, req_content, err_msg = valid_data[self.type]
required.extend(extra)
required = set(required)
pl_keys = set(self.payload.keys())
# if req is not subset of payload keys, this call is wrong
if not set(required) <= pl_keys:
# what is not in pl_keys?
not_pre = required - pl_keys
raise RCAPIError("Required keys: %s" % ', '.join(not_pre))
# Check content, raise with err_msg if not good
try:
if self.payload['content'] != req_content:
raise RCAPIError(err_msg)
except KeyError:
raise RCAPIError('content not in payload')
def get_content(self, r):
"""Abstraction for grabbing content from a returned response"""
if self.type == 'exp_file':
# don't use the decoded r.text
return r.content
elif self.type == 'version':
return r.content
else:
if self.fmt == 'json':
content = {}
# Decode
try:
# Watch out for bad/empty json
content = json.loads(r.text, strict=False)
except ValueError as e:
if not self.expect_empty_json():
# reraise for requests that shouldn't send empty json
raise ValueError(e)
finally:
return content
else:
return r.text
def expect_empty_json(self):
"""Some responses are known to send empty responses"""
return self.type in ('imp_file', 'del_file')
def raise_for_status(self, r):
"""Given a response, raise for bad status for certain actions
Some redcap api methods don't return error messages
that the user could test for or otherwise use. Therefore, we
need to do the testing ourself
Raising for everything wouldn't let the user see the
(hopefully helpful) error message"""
if self.type in ('metadata', 'exp_file', 'imp_file', 'del_file'):
r.raise_for_status()
# see http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
# specifically 10.5
if 500 <= r.status_code < 600:
raise RedcapError(r.content)
|
redcap-tools/PyCap | redcap/request.py | RCRequest.get_content | python | def get_content(self, r):
if self.type == 'exp_file':
# don't use the decoded r.text
return r.content
elif self.type == 'version':
return r.content
else:
if self.fmt == 'json':
content = {}
# Decode
try:
# Watch out for bad/empty json
content = json.loads(r.text, strict=False)
except ValueError as e:
if not self.expect_empty_json():
# reraise for requests that shouldn't send empty json
raise ValueError(e)
finally:
return content
else:
return r.text | Abstraction for grabbing content from a returned response | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/request.py#L129-L150 | [
"def expect_empty_json(self):\n \"\"\"Some responses are known to send empty responses\"\"\"\n return self.type in ('imp_file', 'del_file')\n"
] | class RCRequest(object):
"""
Private class wrapping the REDCap API. Decodes response from redcap
and returns it.
References
----------
https://redcap.vanderbilt.edu/api/help/
Users shouldn't really need to use this, the Project class is the
biggest consumer.
"""
def __init__(self, url, payload, qtype):
"""
Constructor
Parameters
----------
url : str
REDCap API URL
payload : dict
key,values corresponding to the REDCap API
qtype : str
Used to validate payload contents against API
"""
self.url = url
self.payload = payload
self.type = qtype
if qtype:
self.validate()
fmt_key = 'returnFormat' if 'returnFormat' in payload else 'format'
self.fmt = payload[fmt_key]
def validate(self):
"""Checks that at least required params exist"""
required = ['token', 'content']
valid_data = {
'exp_record': (['type', 'format'], 'record',
'Exporting record but content is not record'),
'imp_record': (['type', 'overwriteBehavior', 'data', 'format'],
'record', 'Importing record but content is not record'),
'metadata': (['format'], 'metadata',
'Requesting metadata but content != metadata'),
'exp_file': (['action', 'record', 'field'], 'file',
'Exporting file but content is not file'),
'imp_file': (['action', 'record', 'field'], 'file',
'Importing file but content is not file'),
'del_file': (['action', 'record', 'field'], 'file',
'Deleteing file but content is not file'),
'exp_event': (['format'], 'event',
'Exporting events but content is not event'),
'exp_arm': (['format'], 'arm',
'Exporting arms but content is not arm'),
'exp_fem': (['format'], 'formEventMapping',
'Exporting form-event mappings but content != formEventMapping'),
'exp_user': (['format'], 'user',
'Exporting users but content is not user'),
'exp_survey_participant_list': (['instrument'], 'participantList',
'Exporting Survey Participant List but content != participantList'),
'version': (['format'], 'version',
'Requesting version but content != version')
}
extra, req_content, err_msg = valid_data[self.type]
required.extend(extra)
required = set(required)
pl_keys = set(self.payload.keys())
# if req is not subset of payload keys, this call is wrong
if not set(required) <= pl_keys:
# what is not in pl_keys?
not_pre = required - pl_keys
raise RCAPIError("Required keys: %s" % ', '.join(not_pre))
# Check content, raise with err_msg if not good
try:
if self.payload['content'] != req_content:
raise RCAPIError(err_msg)
except KeyError:
raise RCAPIError('content not in payload')
def execute(self, **kwargs):
"""Execute the API request and return data
Parameters
----------
kwargs :
passed to requests.post()
Returns
-------
response : list, str
data object from JSON decoding process if format=='json',
else return raw string (ie format=='csv'|'xml')
"""
r = post(self.url, data=self.payload, **kwargs)
# Raise if we need to
self.raise_for_status(r)
content = self.get_content(r)
return content, r.headers
def expect_empty_json(self):
"""Some responses are known to send empty responses"""
return self.type in ('imp_file', 'del_file')
def raise_for_status(self, r):
"""Given a response, raise for bad status for certain actions
Some redcap api methods don't return error messages
that the user could test for or otherwise use. Therefore, we
need to do the testing ourself
Raising for everything wouldn't let the user see the
(hopefully helpful) error message"""
if self.type in ('metadata', 'exp_file', 'imp_file', 'del_file'):
r.raise_for_status()
# see http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
# specifically 10.5
if 500 <= r.status_code < 600:
raise RedcapError(r.content)
|
redcap-tools/PyCap | redcap/request.py | RCRequest.raise_for_status | python | def raise_for_status(self, r):
if self.type in ('metadata', 'exp_file', 'imp_file', 'del_file'):
r.raise_for_status()
# see http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
# specifically 10.5
if 500 <= r.status_code < 600:
raise RedcapError(r.content) | Given a response, raise for bad status for certain actions
Some redcap api methods don't return error messages
that the user could test for or otherwise use. Therefore, we
need to do the testing ourself
Raising for everything wouldn't let the user see the
(hopefully helpful) error message | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/request.py#L156-L170 | null | class RCRequest(object):
"""
Private class wrapping the REDCap API. Decodes response from redcap
and returns it.
References
----------
https://redcap.vanderbilt.edu/api/help/
Users shouldn't really need to use this, the Project class is the
biggest consumer.
"""
def __init__(self, url, payload, qtype):
"""
Constructor
Parameters
----------
url : str
REDCap API URL
payload : dict
key,values corresponding to the REDCap API
qtype : str
Used to validate payload contents against API
"""
self.url = url
self.payload = payload
self.type = qtype
if qtype:
self.validate()
fmt_key = 'returnFormat' if 'returnFormat' in payload else 'format'
self.fmt = payload[fmt_key]
def validate(self):
"""Checks that at least required params exist"""
required = ['token', 'content']
valid_data = {
'exp_record': (['type', 'format'], 'record',
'Exporting record but content is not record'),
'imp_record': (['type', 'overwriteBehavior', 'data', 'format'],
'record', 'Importing record but content is not record'),
'metadata': (['format'], 'metadata',
'Requesting metadata but content != metadata'),
'exp_file': (['action', 'record', 'field'], 'file',
'Exporting file but content is not file'),
'imp_file': (['action', 'record', 'field'], 'file',
'Importing file but content is not file'),
'del_file': (['action', 'record', 'field'], 'file',
'Deleteing file but content is not file'),
'exp_event': (['format'], 'event',
'Exporting events but content is not event'),
'exp_arm': (['format'], 'arm',
'Exporting arms but content is not arm'),
'exp_fem': (['format'], 'formEventMapping',
'Exporting form-event mappings but content != formEventMapping'),
'exp_user': (['format'], 'user',
'Exporting users but content is not user'),
'exp_survey_participant_list': (['instrument'], 'participantList',
'Exporting Survey Participant List but content != participantList'),
'version': (['format'], 'version',
'Requesting version but content != version')
}
extra, req_content, err_msg = valid_data[self.type]
required.extend(extra)
required = set(required)
pl_keys = set(self.payload.keys())
# if req is not subset of payload keys, this call is wrong
if not set(required) <= pl_keys:
# what is not in pl_keys?
not_pre = required - pl_keys
raise RCAPIError("Required keys: %s" % ', '.join(not_pre))
# Check content, raise with err_msg if not good
try:
if self.payload['content'] != req_content:
raise RCAPIError(err_msg)
except KeyError:
raise RCAPIError('content not in payload')
def execute(self, **kwargs):
"""Execute the API request and return data
Parameters
----------
kwargs :
passed to requests.post()
Returns
-------
response : list, str
data object from JSON decoding process if format=='json',
else return raw string (ie format=='csv'|'xml')
"""
r = post(self.url, data=self.payload, **kwargs)
# Raise if we need to
self.raise_for_status(r)
content = self.get_content(r)
return content, r.headers
def get_content(self, r):
"""Abstraction for grabbing content from a returned response"""
if self.type == 'exp_file':
# don't use the decoded r.text
return r.content
elif self.type == 'version':
return r.content
else:
if self.fmt == 'json':
content = {}
# Decode
try:
# Watch out for bad/empty json
content = json.loads(r.text, strict=False)
except ValueError as e:
if not self.expect_empty_json():
# reraise for requests that shouldn't send empty json
raise ValueError(e)
finally:
return content
else:
return r.text
def expect_empty_json(self):
"""Some responses are known to send empty responses"""
return self.type in ('imp_file', 'del_file')
|
redcap-tools/PyCap | redcap/project.py | Project.__basepl | python | def __basepl(self, content, rec_type='flat', format='json'):
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d | Return a dictionary which can be used as is or added to for
payloads | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L95-L101 | null | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.is_longitudinal | python | def is_longitudinal(self):
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0 | Returns
-------
boolean :
longitudinal status of this project | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L114-L123 | null | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.filter_metadata | python | def filter_metadata(self, key):
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered | Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L125-L143 | null | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.export_fem | python | def export_fem(self, arms=None, format='json', df_kwargs=None):
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs) | Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L157-L194 | [
"def __basepl(self, content, rec_type='flat', format='json'):\n \"\"\"Return a dictionary which can be used as is or added to for\n payloads\"\"\"\n d = {'token': self.token, 'content': content, 'format': format}\n if content not in ['metadata', 'file']:\n d['type'] = rec_type\n return d\n",
"def _call_api(self, payload, typpe, **kwargs):\n request_kwargs = self._kwargs()\n request_kwargs.update(kwargs)\n rcr = RCRequest(self.url, payload, typpe)\n return rcr.execute(**request_kwargs)\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.export_metadata | python | def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs) | Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project. | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L196-L236 | [
"def __basepl(self, content, rec_type='flat', format='json'):\n \"\"\"Return a dictionary which can be used as is or added to for\n payloads\"\"\"\n d = {'token': self.token, 'content': content, 'format': format}\n if content not in ['metadata', 'file']:\n d['type'] = rec_type\n return d\n",
"def _call_api(self, payload, typpe, **kwargs):\n request_kwargs = self._kwargs()\n request_kwargs.update(kwargs)\n rcr = RCRequest(self.url, payload, typpe)\n return rcr.execute(**request_kwargs)\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.export_records | python | def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df | Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L238-L335 | [
"def __basepl(self, content, rec_type='flat', format='json'):\n \"\"\"Return a dictionary which can be used as is or added to for\n payloads\"\"\"\n d = {'token': self.token, 'content': content, 'format': format}\n if content not in ['metadata', 'file']:\n d['type'] = rec_type\n return d\n",
"def is_longitudinal(self):\n \"\"\"\n Returns\n -------\n boolean :\n longitudinal status of this project\n \"\"\"\n return len(self.events) > 0 and \\\n len(self.arm_nums) > 0 and \\\n len(self.arm_names) > 0\n",
"def _call_api(self, payload, typpe, **kwargs):\n request_kwargs = self._kwargs()\n request_kwargs.update(kwargs)\n rcr = RCRequest(self.url, payload, typpe)\n return rcr.execute(**request_kwargs)\n",
"def backfill_fields(self, fields, forms):\n \"\"\"\n Properly backfill fields to explicitly request specific\n keys. The issue is that >6.X servers *only* return requested fields\n so to improve backwards compatiblity for PyCap clients, add specific fields\n when required.\n\n Parameters\n ----------\n fields: list\n requested fields\n forms: list\n requested forms\n\n Returns\n -------\n new fields, forms\n \"\"\"\n if forms and not fields:\n new_fields = [self.def_field]\n elif fields and self.def_field not in fields:\n new_fields = list(fields)\n if self.def_field not in fields:\n new_fields.append(self.def_field)\n elif not fields:\n new_fields = self.field_names\n else:\n new_fields = list(fields)\n return new_fields\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.__meta_metadata | python | def __meta_metadata(self, field, key):
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf | Return the value for key for the field in the metadata | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L342-L352 | null | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.backfill_fields | python | def backfill_fields(self, fields, forms):
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields | Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L354-L382 | null | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.filter | python | def filter(self, query, output_fields=None):
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return [] | Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database. | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L384-L420 | null | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.names_labels | python | def names_labels(self, do_print=False):
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels | Simple helper function to get all field names and labels | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L422-L427 | null | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.import_records | python | def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response | Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'`` | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L429-L501 | [
"def __basepl(self, content, rec_type='flat', format='json'):\n \"\"\"Return a dictionary which can be used as is or added to for\n payloads\"\"\"\n d = {'token': self.token, 'content': content, 'format': format}\n if content not in ['metadata', 'file']:\n d['type'] = rec_type\n return d\n",
"def is_longitudinal(self):\n \"\"\"\n Returns\n -------\n boolean :\n longitudinal status of this project\n \"\"\"\n return len(self.events) > 0 and \\\n len(self.arm_nums) > 0 and \\\n len(self.arm_names) > 0\n",
"def _call_api(self, payload, typpe, **kwargs):\n request_kwargs = self._kwargs()\n request_kwargs.update(kwargs)\n rcr = RCRequest(self.url, payload, typpe)\n return rcr.execute(**request_kwargs)\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.export_file | python | def export_file(self, record, field, event=None, return_format='json'):
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map | Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L503-L549 | [
"def __basepl(self, content, rec_type='flat', format='json'):\n \"\"\"Return a dictionary which can be used as is or added to for\n payloads\"\"\"\n d = {'token': self.token, 'content': content, 'format': format}\n if content not in ['metadata', 'file']:\n d['type'] = rec_type\n return d\n",
"def _call_api(self, payload, typpe, **kwargs):\n request_kwargs = self._kwargs()\n request_kwargs.update(kwargs)\n rcr = RCRequest(self.url, payload, typpe)\n return rcr.execute(**request_kwargs)\n",
"def _check_file_field(self, field):\n \"\"\"Check that field exists and is a file field\"\"\"\n is_field = field in self.field_names\n is_file = self.__meta_metadata(field, 'field_type') == 'file'\n if not (is_field and is_file):\n msg = \"'%s' is not a field or not a 'file' field\" % field\n raise ValueError(msg)\n else:\n return True\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.import_file | python | def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0] | Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format`` | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L551-L589 | [
"def __basepl(self, content, rec_type='flat', format='json'):\n \"\"\"Return a dictionary which can be used as is or added to for\n payloads\"\"\"\n d = {'token': self.token, 'content': content, 'format': format}\n if content not in ['metadata', 'file']:\n d['type'] = rec_type\n return d\n",
"def _call_api(self, payload, typpe, **kwargs):\n request_kwargs = self._kwargs()\n request_kwargs.update(kwargs)\n rcr = RCRequest(self.url, payload, typpe)\n return rcr.execute(**request_kwargs)\n",
"def _check_file_field(self, field):\n \"\"\"Check that field exists and is a file field\"\"\"\n is_field = field in self.field_names\n is_file = self.__meta_metadata(field, 'field_type') == 'file'\n if not (is_field and is_file):\n msg = \"'%s' is not a field or not a 'file' field\" % field\n raise ValueError(msg)\n else:\n return True\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.delete_file | python | def delete_file(self, record, field, return_format='json', event=None):
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0] | Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L591-L625 | [
"def __basepl(self, content, rec_type='flat', format='json'):\n \"\"\"Return a dictionary which can be used as is or added to for\n payloads\"\"\"\n d = {'token': self.token, 'content': content, 'format': format}\n if content not in ['metadata', 'file']:\n d['type'] = rec_type\n return d\n",
"def _call_api(self, payload, typpe, **kwargs):\n request_kwargs = self._kwargs()\n request_kwargs.update(kwargs)\n rcr = RCRequest(self.url, payload, typpe)\n return rcr.execute(**request_kwargs)\n",
"def _check_file_field(self, field):\n \"\"\"Check that field exists and is a file field\"\"\"\n is_field = field in self.field_names\n is_file = self.__meta_metadata(field, 'field_type') == 'file'\n if not (is_field and is_file):\n msg = \"'%s' is not a field or not a 'file' field\" % field\n raise ValueError(msg)\n else:\n return True\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project._check_file_field | python | def _check_file_field(self, field):
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True | Check that field exists and is a file field | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L627-L635 | [
"def __meta_metadata(self, field, key):\n \"\"\"Return the value for key for the field in the metadata\"\"\"\n mf = ''\n try:\n mf = str([f[key] for f in self.metadata\n if f['field_name'] == field][0])\n except IndexError:\n print(\"%s not in metadata field:%s\" % (key, field))\n return mf\n else:\n return mf\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.export_users | python | def export_users(self, format='json'):
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0] | Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L637-L671 | [
"def __basepl(self, content, rec_type='flat', format='json'):\n \"\"\"Return a dictionary which can be used as is or added to for\n payloads\"\"\"\n d = {'token': self.token, 'content': content, 'format': format}\n if content not in ['metadata', 'file']:\n d['type'] = rec_type\n return d\n",
"def _call_api(self, payload, typpe, **kwargs):\n request_kwargs = self._kwargs()\n request_kwargs.update(kwargs)\n rcr = RCRequest(self.url, payload, typpe)\n return rcr.execute(**request_kwargs)\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_survey_participant_list(self, instrument, event=None, format='json'):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list')
|
redcap-tools/PyCap | redcap/project.py | Project.export_survey_participant_list | python | def export_survey_participant_list(self, instrument, event=None, format='json'):
pl = self.__basepl(content='participantList', format=format)
pl['instrument'] = instrument
if event:
pl['event'] = event
return self._call_api(pl, 'exp_survey_participant_list') | Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L673-L694 | [
"def __basepl(self, content, rec_type='flat', format='json'):\n \"\"\"Return a dictionary which can be used as is or added to for\n payloads\"\"\"\n d = {'token': self.token, 'content': content, 'format': format}\n if content not in ['metadata', 'file']:\n d['type'] = rec_type\n return d\n",
"def _call_api(self, payload, typpe, **kwargs):\n request_kwargs = self._kwargs()\n request_kwargs.update(kwargs)\n rcr = RCRequest(self.url, payload, typpe)\n return rcr.execute(**request_kwargs)\n"
] | class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
try:
self.redcap_version = self.__rcv()
except:
raise RedcapError("Determination of REDCap version failed")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
if isinstance(ev_data, dict) and ('error' in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ('error' in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def __rcv(self):
p_l = self.__basepl('version')
rcv = self._call_api(p_l, 'version')[0].decode('utf-8')
if 'error' in rcv:
warnings.warn('Version information not available for this REDCap instance')
return ''
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
else:
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None,
export_checkbox_labels=False, filter_logic=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups, export_checkbox_labels)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
'exportCheckboxLabel')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
if filter_logic:
pl["filterLogic"] = filter_logic
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD', force_auto_number=False):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
pl['forceAutoNumber'] = force_auto_number
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
|
pri22296/beautifultable | beautifultable/ansi.py | ANSIMultiByteString.wrap | python | def wrap(self, width):
res = []
prev_state = set()
part = []
cwidth = 0
for char, _width, state in zip(self._string, self._width, self._state):
if cwidth + _width > width:
if prev_state:
part.append(self.ANSI_RESET)
res.append("".join(part))
prev_state = set()
part = []
cwidth = 0
cwidth += _width
if prev_state == state:
pass
elif prev_state <= state:
part.extend(state - prev_state)
else:
part.append(self.ANSI_RESET)
part.extend(state)
prev_state = state
part.append(char)
if prev_state:
part.append(self.ANSI_RESET)
if part:
res.append("".join(part))
return res | Returns a partition of the string based on `width` | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/ansi.py#L87-L115 | null | class ANSIMultiByteString(object):
ANSI_REGEX = re.compile(r'(\x1B\[[0-?]*[ -/]*[@-~])')
ANSI_RESET = '\x1b[0m'
def __init__(self, string):
self._string = []
self._state = []
self._width = []
self._termwidth = 0
state = set()
for token in re.split(self.ANSI_REGEX, to_unicode(string)):
if token:
if re.match(self.ANSI_REGEX, token):
if token == self.ANSI_RESET:
state.clear()
else:
state.add(token)
else:
s_copy = set(state)
for char in token:
w = wcwidth(char)
if w == -1:
raise ValueError(("Unsupported Literal {} in "
"string {}").format(repr(char),
repr(token)))
self._termwidth += w
self._string.append(char)
self._width.append(w)
self._state.append(s_copy)
def __len__(self):
return len(self._string)
def __getitem__(self, key):
if isinstance(key, int):
if self._state[key]:
return ("".join(self._state[key])
+ self._string[key]
+ self.ANSI_RESET)
else:
return self._string[key]
elif isinstance(key, slice):
return self._slice(key)
else:
raise TypeError(("table indices must be integers or slices, "
"not {}").format(type(key).__name__))
def _slice(self, key):
res = []
prev_state = set()
for char, state in zip(self._string[key], self._state[key]):
if prev_state == state:
pass
elif prev_state <= state:
res.extend(state - prev_state)
else:
res.append(self.ANSI_RESET)
res.extend(state)
prev_state = state
res.append(char)
if prev_state:
res.append(self.ANSI_RESET)
return "".join(res)
def termwidth(self):
"""Returns the width of string as when printed to a terminal"""
return self._termwidth
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.max_table_width | python | def max_table_width(self):
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width | get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L401-L414 | [
"def termwidth(item):\n \"\"\"Returns the visible width of the string as shown on the terminal\"\"\"\n obj = ANSIMultiByteString(to_unicode(item))\n return obj.termwidth()\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable._initialize_table | python | def _initialize_table(self, column_count):
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding) | Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L422-L442 | null | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.set_style | python | def set_style(self, style):
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right | Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L580-L627 | null | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable._calculate_column_widths | python | def _calculate_column_widths(self):
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i] | Calculate width of column automatically based on data. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L629-L701 | [
"def termwidth(item):\n \"\"\"Returns the visible width of the string as shown on the terminal\"\"\"\n obj = ANSIMultiByteString(to_unicode(item))\n return obj.termwidth()\n",
"def get_output_str(item, detect_numerics, precision, sign_value):\n \"\"\"Returns the final string which should be displayed\"\"\"\n if detect_numerics:\n item = _convert_to_numeric(item)\n if isinstance(item, float):\n item = round(item, precision)\n try:\n item = '{:{sign}}'.format(item, sign=sign_value)\n except (ValueError, TypeError):\n pass\n return to_unicode(item)\n",
"def get_table_width(self):\n \"\"\"Get the width of the table as number of characters.\n\n Column width should be set prior to calling this method.\n\n Returns\n -------\n int\n Width of the table as number of characters.\n \"\"\"\n if self.column_count == 0:\n return 0\n width = sum(self._column_widths)\n width += ((self._column_count - 1)\n * termwidth(self.column_separator_char))\n width += termwidth(self.left_border_char)\n width += termwidth(self.right_border_char)\n return width\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.sort | python | def sort(self, key, reverse=False):
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse) | Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L718-L734 | [
"def get_column_index(self, header):\n \"\"\"Get index of a column from it's header.\n\n Parameters\n ----------\n header: str\n header of the column.\n\n Raises\n ------\n ValueError:\n If no column could be found corresponding to `header`.\n \"\"\"\n try:\n index = self._column_headers.index(header)\n return index\n except ValueError:\n raise_suppressed(KeyError((\"'{}' is not a header for any \"\n \"column\").format(header)))\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.get_column_index | python | def get_column_index(self, header):
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header))) | Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L756-L774 | [
"def raise_suppressed(exp):\n exp.__cause__ = None\n raise exp\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.get_column | python | def get_column(self, key):
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table)) | Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L776-L802 | [
"def get_column_index(self, header):\n \"\"\"Get index of a column from it's header.\n\n Parameters\n ----------\n header: str\n header of the column.\n\n Raises\n ------\n ValueError:\n If no column could be found corresponding to `header`.\n \"\"\"\n try:\n index = self._column_headers.index(header)\n return index\n except ValueError:\n raise_suppressed(KeyError((\"'{}' is not a header for any \"\n \"column\").format(header)))\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.pop_column | python | def pop_column(self, index=-1):
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index) | Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L819-L858 | [
"def get_column_index(self, header):\n \"\"\"Get index of a column from it's header.\n\n Parameters\n ----------\n header: str\n header of the column.\n\n Raises\n ------\n ValueError:\n If no column could be found corresponding to `header`.\n \"\"\"\n try:\n index = self._column_headers.index(header)\n return index\n except ValueError:\n raise_suppressed(KeyError((\"'{}' is not a header for any \"\n \"column\").format(header)))\n",
"def clear(self, clear_metadata=False):\n \"\"\"Clear the contents of the table.\n\n Clear all rows of the table, and if specified clears all column\n specific data.\n\n Parameters\n ----------\n clear_metadata : bool, optional\n If it is true(default False), all metadata of columns such as their\n alignment, padding, width, etc. are also cleared and number of\n columns is set to 0.\n \"\"\"\n # Cannot use clear method to support Python 2.7\n del self._table[:]\n if clear_metadata:\n self._initialize_table(0)\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.insert_row | python | def insert_row(self, index, row):
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj) | Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L860-L882 | [
"def _validate_row(self, value, init_table_if_required=True):\n # TODO: Rename this method\n # str is also an iterable but it is not a valid row, so\n # an extra check is required for str\n if not isinstance(value, Iterable) or isinstance(value, basestring):\n raise TypeError(\"parameter must be an iterable\")\n\n row = list(value)\n if init_table_if_required and self._column_count == 0:\n self._initialize_table(len(row))\n\n if len(row) != self._column_count:\n raise ValueError((\"'Expected iterable of length {}, \"\n \"got {}\").format(self._column_count, len(row)))\n return row\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.update_row | python | def update_row(self, key, value):
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object") | Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L895-L933 | [
"def _validate_row(self, value, init_table_if_required=True):\n # TODO: Rename this method\n # str is also an iterable but it is not a valid row, so\n # an extra check is required for str\n if not isinstance(value, Iterable) or isinstance(value, basestring):\n raise TypeError(\"parameter must be an iterable\")\n\n row = list(value)\n if init_table_if_required and self._column_count == 0:\n self._initialize_table(len(row))\n\n if len(row) != self._column_count:\n raise ValueError((\"'Expected iterable of length {}, \"\n \"got {}\").format(self._column_count, len(row)))\n return row\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.update_column | python | def update_column(self, header, column):
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item | Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L935-L961 | [
"def get_column_index(self, header):\n \"\"\"Get index of a column from it's header.\n\n Parameters\n ----------\n header: str\n header of the column.\n\n Raises\n ------\n ValueError:\n If no column could be found corresponding to `header`.\n \"\"\"\n try:\n index = self._column_headers.index(header)\n return index\n except ValueError:\n raise_suppressed(KeyError((\"'{}' is not a header for any \"\n \"column\").format(header)))\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.insert_column | python | def insert_column(self, index, header, column):
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1)) | Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L963-L1016 | null | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.append_column | python | def append_column(self, header, column):
self.insert_column(self._column_count, header, column) | Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L1018-L1029 | [
"def insert_column(self, index, header, column):\n \"\"\"Insert a column before `index` in the table.\n\n If length of column is bigger than number of rows, lets say\n `k`, only the first `k` values of `column` is considered.\n If column is shorter than 'k', ValueError is raised.\n\n Note that Table remains in consistent state even if column\n is too short. Any changes made by this method is rolled back\n before raising the exception.\n\n Parameters\n ----------\n index : int\n List index rules apply.\n\n header : str\n Title of the column.\n\n column : iterable\n Any iterable of appropriate length.\n\n Raises\n ------\n TypeError:\n If `header` is not of type `str`.\n\n ValueError:\n If length of `column` is shorter than number of rows.\n \"\"\"\n if self._column_count == 0:\n self.column_headers = HeaderData(self, [header])\n self._table = [RowData(self, [i]) for i in column]\n else:\n if not isinstance(header, basestring):\n raise TypeError(\"header must be of type str\")\n column_length = 0\n for i, (row, new_item) in enumerate(zip(self._table, column)):\n row._insert(index, new_item)\n column_length = i\n if column_length == len(self._table) - 1:\n self._column_count += 1\n self._column_headers._insert(index, header)\n self._column_alignments._insert(index, self.default_alignment)\n self._column_widths._insert(index, 0)\n self._left_padding_widths._insert(index, self.default_padding)\n self._right_padding_widths._insert(index, self.default_padding)\n else:\n # Roll back changes so that table remains in consistent state\n for j in range(column_length, -1, -1):\n self._table[j]._pop(index)\n raise ValueError((\"length of 'column' should be atleast {}, \"\n \"got {}\").format(len(self._table),\n column_length + 1))\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable._get_horizontal_line | python | def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line) | Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L1049-L1110 | [
"def termwidth(item):\n \"\"\"Returns the visible width of the string as shown on the terminal\"\"\"\n obj = ANSIMultiByteString(to_unicode(item))\n return obj.termwidth()\n",
"def get_table_width(self):\n \"\"\"Get the width of the table as number of characters.\n\n Column width should be set prior to calling this method.\n\n Returns\n -------\n int\n Width of the table as number of characters.\n \"\"\"\n if self.column_count == 0:\n return 0\n width = sum(self._column_widths)\n width += ((self._column_count - 1)\n * termwidth(self.column_separator_char))\n width += termwidth(self.left_border_char)\n width += termwidth(self.right_border_char)\n return width\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.get_table_width | python | def get_table_width(self):
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width | Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L1188-L1205 | [
"def termwidth(item):\n \"\"\"Returns the visible width of the string as shown on the terminal\"\"\"\n obj = ANSIMultiByteString(to_unicode(item))\n return obj.termwidth()\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_string(self, recalculate_width=True):
"""Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string.
"""
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_)
|
pri22296/beautifultable | beautifultable/beautifultable.py | BeautifulTable.get_string | python | def get_string(self, recalculate_width=True):
# Empty table. returning empty string.
if len(self._table) == 0:
return ''
if self.serialno and self.column_count > 0:
self.insert_column(0, self.serialno_header,
range(1, len(self) + 1))
# Should widths of column be recalculated
if recalculate_width or sum(self._column_widths) == 0:
self._calculate_column_widths()
string_ = []
# Drawing the top border
if self.top_border_char:
string_.append(
self._get_top_border())
# Print headers if not empty or only spaces
if ''.join(self._column_headers).strip():
headers = to_unicode(self._column_headers)
string_.append(headers)
if self.header_separator_char:
string_.append(
self._get_header_separator())
# Printing rows
first_row_encountered = False
for row in self._table:
if first_row_encountered and self.row_separator_char:
string_.append(
self._get_row_separator())
first_row_encountered = True
content = to_unicode(row)
string_.append(content)
# Drawing the bottom border
if self.bottom_border_char:
string_.append(
self._get_bottom_border())
if self.serialno and self.column_count > 0:
self.pop_column(0)
return '\n'.join(string_) | Get the table as a String.
Parameters
----------
recalculate_width : bool, optional
If width for each column should be recalculated(default True).
Note that width is always calculated if it wasn't set
explicitly when this method is called for the first time ,
regardless of the value of `recalculate_width`.
Returns
-------
str:
Table as a string. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L1207-L1269 | [
"def _calculate_column_widths(self):\n \"\"\"Calculate width of column automatically based on data.\"\"\"\n table_width = self.get_table_width()\n lpw, rpw = self._left_padding_widths, self._right_padding_widths\n pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]\n max_widths = [0 for index in range(self._column_count)]\n offset = table_width - sum(self._column_widths) + sum(pad_widths)\n self._max_table_width = max(self._max_table_width,\n offset + self._column_count)\n\n for index, column in enumerate(zip(*self._table)):\n max_length = 0\n for i in column:\n for j in to_unicode(i).split('\\n'):\n output_str = get_output_str(j, self.detect_numerics,\n self.numeric_precision,\n self.sign_mode.value)\n max_length = max(max_length, termwidth(output_str))\n for i in to_unicode(self._column_headers[index]).split('\\n'):\n output_str = get_output_str(i, self.detect_numerics,\n self.numeric_precision,\n self.sign_mode.value)\n max_length = max(max_length, termwidth(output_str))\n max_widths[index] += max_length\n\n sum_ = sum(max_widths)\n desired_sum = self._max_table_width - offset\n\n # Set flag for columns who are within their fair share\n temp_sum = 0\n flag = [0] * len(max_widths)\n for i, width in enumerate(max_widths):\n if width <= int(desired_sum / self._column_count):\n temp_sum += width\n flag[i] = 1\n else:\n # Allocate atleast 1 character width to the column\n temp_sum += 1\n\n avail_space = desired_sum - temp_sum\n actual_space = sum_ - temp_sum\n shrinked_columns = {}\n\n # Columns which exceed their fair share should be shrinked based on\n # how much space is left for the table\n for i, width in enumerate(max_widths):\n self.column_widths[i] = width\n if not flag[i]:\n new_width = 1 + int((width-1) * avail_space / actual_space)\n if new_width < width:\n self.column_widths[i] = new_width\n shrinked_columns[new_width] = i\n\n # Divide any remaining space among shrinked columns\n if shrinked_columns:\n extra = (self._max_table_width\n - offset\n - sum(self.column_widths))\n actual_space = sum(shrinked_columns)\n\n if extra > 0:\n for i, width in enumerate(sorted(shrinked_columns)):\n index = shrinked_columns[width]\n extra_width = int(width * extra / actual_space)\n self.column_widths[i] += extra_width\n if i == (len(shrinked_columns) - 1):\n extra = (self._max_table_width\n - offset\n - sum(self.column_widths))\n self.column_widths[index] += extra\n\n for i in range(self.column_count):\n self.column_widths[i] += pad_widths[i]\n",
"def insert_column(self, index, header, column):\n \"\"\"Insert a column before `index` in the table.\n\n If length of column is bigger than number of rows, lets say\n `k`, only the first `k` values of `column` is considered.\n If column is shorter than 'k', ValueError is raised.\n\n Note that Table remains in consistent state even if column\n is too short. Any changes made by this method is rolled back\n before raising the exception.\n\n Parameters\n ----------\n index : int\n List index rules apply.\n\n header : str\n Title of the column.\n\n column : iterable\n Any iterable of appropriate length.\n\n Raises\n ------\n TypeError:\n If `header` is not of type `str`.\n\n ValueError:\n If length of `column` is shorter than number of rows.\n \"\"\"\n if self._column_count == 0:\n self.column_headers = HeaderData(self, [header])\n self._table = [RowData(self, [i]) for i in column]\n else:\n if not isinstance(header, basestring):\n raise TypeError(\"header must be of type str\")\n column_length = 0\n for i, (row, new_item) in enumerate(zip(self._table, column)):\n row._insert(index, new_item)\n column_length = i\n if column_length == len(self._table) - 1:\n self._column_count += 1\n self._column_headers._insert(index, header)\n self._column_alignments._insert(index, self.default_alignment)\n self._column_widths._insert(index, 0)\n self._left_padding_widths._insert(index, self.default_padding)\n self._right_padding_widths._insert(index, self.default_padding)\n else:\n # Roll back changes so that table remains in consistent state\n for j in range(column_length, -1, -1):\n self._table[j]._pop(index)\n raise ValueError((\"length of 'column' should be atleast {}, \"\n \"got {}\").format(len(self._table),\n column_length + 1))\n",
"def _get_top_border(self):\n return self._get_horizontal_line(self.top_border_char,\n self.intersect_top_left,\n self.intersect_top_mid,\n self.intersect_top_right)\n"
] | class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
The instance attributes can be used to customize the look of the
table. To disable a behaviour, just set its corresponding attribute
to an empty string. For example, if Top border should not be drawn,
set `top_border_char` to ''.
Parameters
----------
max_width: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
Attributes
----------
left_border_char : str
Character used to draw the left border.
right_border_char : str
Character used to draw the right border.
top_border_char : str
Character used to draw the top border.
bottom_border_char : str
Character used to draw the bottom border.
header_separator_char : str
Character used to draw the line seperating Header from data.
row_separator_char : str
Character used to draw the line seperating two rows.
column_separator_char : str
Character used to draw the line seperating two columns.
intersection_char : str
Character used to draw intersection of a vertical and horizontal
line. Disabling it just draws the horizontal line char in it's place.
(DEPRECATED).
intersect_top_left : str
Left most character of the top border.
intersect_top_mid : str
Intersection character for top border.
intersect_top_right : str
Right most character of the top border.
intersect_header_left : str
Left most character of the header separator.
intersect_header_mid : str
Intersection character for header separator.
intersect_header_right : str
Right most character of the header separator.
intersect_row_left : str
Left most character of the row separator.
intersect_row_mid : str
Intersection character for row separator.
intersect_row_right : str
Right most character of the row separator.
intersect_bottom_left : str
Left most character of the bottom border.
intersect_bottom_mid : str
Intersection character for bottom border.
intersect_bottom_right : str
Right most character of the bottom border.
numeric_precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool
Whether automatically generated serial number should be printed for
each row(Default False).
serialno_header : str
The header of the autogenerated serial number column. This value is
only used if serialno is True(Default SN).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
def __init__(self, max_width=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1):
self.set_style(enums.STYLE_DEFAULT)
self.numeric_precision = 3
self.serialno = False
self.serialno_header = "SN"
self.detect_numerics = True
self._column_count = 0
self._sign_mode = enums.SM_MINUS
self._width_exceed_policy = enums.WEP_WRAP
self._column_pad = " "
self.default_alignment = default_alignment
self.default_padding = default_padding
self.max_table_width = max_width
self._initialize_table(0)
self._table = []
def __setattr__(self, name, value):
attrs = ('left_border_char', 'right_border_char', 'top_border_char',
'bottom_border_char', 'header_separator_char',
'column_separator_char', 'row_separator_char',
'intersect_top_left', 'intersect_top_mid',
'intersect_top_right', 'intersect_header_left',
'intersect_header_mid', 'intersect_header_right',
'intersect_row_left', 'intersect_row_mid',
'intersect_row_right', 'intersect_bottom_left',
'intersect_bottom_mid', 'intersect_bottom_right')
if to_unicode(name) in attrs and not isinstance(value, basestring):
value_type = type(value).__name__
raise TypeError(("Expected {attr} to be of type 'str', "
"got '{attr_type}'").format(attr=name,
attr_type=value_type))
super(BeautifulTable, self).__setattr__(name, value)
# ****************************Properties Begin Here****************************
@property
def column_count(self):
"""Get the number of columns in the table(read only)"""
return self._column_count
@property
def intersection_char(self): # pragma : no cover
"""Character used to draw intersection of perpendicular lines.
Disabling it just draws the horizontal line char in it's place.
This attribute is deprecated. Use specific intersect_*_* attribute.
"""
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attribute instead")
return self.intersect_top_left
@intersection_char.setter
def intersection_char(self, value): # pragma : no cover
deprecation("'intersection_char' is deprecated, Use specific "
"`intersect_*_*` attributes instead")
self.intersect_top_left = value
self.intersect_top_mid = value
self.intersect_top_right = value
self.intersect_header_left = value
self.intersect_header_mid = value
self.intersect_header_right = value
self.intersect_row_left = value
self.intersect_row_mid = value
self.intersect_row_right = value
self.intersect_bottom_left = value
self.intersect_bottom_mid = value
self.intersect_bottom_right = value
@property
def sign_mode(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign_mode
@sign_mode.setter
def sign_mode(self, value):
if not isinstance(value, enums.SignMode):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode)
error_msg = ("allowed values for sign_mode are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._sign_mode = value
@property
def width_exceed_policy(self):
"""Attribute to control how exceeding column width should be handled.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifulbable.WEP_WRAP An item is wrapped so every line fits
within it's column width.
beautifultable.WEP_STRIP An item is stripped to fit in it's
column.
beautifultable.WEP_ELLIPSIS An item is stripped to fit in it's
column and appended with ...(Ellipsis).
============================ =========================================
"""
return self._width_exceed_policy
@width_exceed_policy.setter
def width_exceed_policy(self, value):
if not isinstance(value, enums.WidthExceedPolicy):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.WidthExceedPolicy)
error_msg = ("allowed values for width_exceed_policy are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._width_exceed_policy = value
@property
def default_alignment(self):
"""Attribute to control the alignment of newly created columns.
It can be one of the following:
============================ =========================================
Option Meaning
============================ =========================================
beautifultable.ALIGN_LEFT New columns are left aligned.
beautifultable.ALIGN_CENTER New columns are center aligned.
beautifultable.ALIGN_RIGHT New columns are right aligned.
============================ =========================================
"""
return self._default_alignment
@default_alignment.setter
def default_alignment(self, value):
if not isinstance(value, enums.Alignment):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Alignment)
error_msg = ("allowed values for default_alignment are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
self._default_alignment = value
@property
def default_padding(self):
"""Initial value for Left and Right padding widths for new columns."""
return self._default_padding
@default_padding.setter
def default_padding(self, value):
if not isinstance(value, int):
raise TypeError("padding must be an integer")
elif value <= 0:
raise ValueError("padding must be more than 0")
else:
self._default_padding = value
@property
def column_widths(self):
"""get/set width for the columns of the table.
Width of the column specifies the max number of characters
a column can contain. Larger characters are handled according to
the value of `width_exceed_policy`.
"""
return self._column_widths
@column_widths.setter
def column_widths(self, value):
width = self._validate_row(value)
self._column_widths = PositiveIntegerMetaData(self, width)
@property
def column_headers(self):
"""get/set titles for the columns of the table.
It can be any iterable having all memebers an instance of `str`.
"""
return self._column_headers
@column_headers.setter
def column_headers(self, value):
header = self._validate_row(value)
for i in header:
if not isinstance(i, basestring):
raise TypeError(("Headers should be of type 'str', "
"not {}").format(type(i)))
self._column_headers = HeaderData(self, header)
@property
def column_alignments(self):
"""get/set alignment of the columns of the table.
It can be any iterable containing only the following:
* beautifultable.ALIGN_LEFT
* beautifultable.ALIGN_CENTER
* beautifultable.ALIGN_RIGHT
"""
return self._column_alignments
@column_alignments.setter
def column_alignments(self, value):
alignment = self._validate_row(value)
self._column_alignments = AlignmentMetaData(self, alignment)
@property
def left_padding_widths(self):
"""get/set width for left padding of the columns of the table.
Left Width of the padding specifies the number of characters
on the left of a column reserved for padding. By Default It is 1.
"""
return self._left_padding_widths
@left_padding_widths.setter
def left_padding_widths(self, value):
pad_width = self._validate_row(value)
self._left_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def right_padding_widths(self):
"""get/set width for right padding of the columns of the table.
Right Width of the padding specifies the number of characters
on the rigth of a column reserved for padding. By default It is 1.
"""
return self._right_padding_widths
@right_padding_widths.setter
def right_padding_widths(self, value):
pad_width = self._validate_row(value)
self._right_padding_widths = PositiveIntegerMetaData(self, pad_width)
@property
def max_table_width(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = ((self._column_count - 1)
* termwidth(self.column_separator_char))
offset += termwidth(self.left_border_char)
offset += termwidth(self.right_border_char)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
return self._max_table_width
@max_table_width.setter
def max_table_width(self, value):
self._max_table_width = value
# *****************************Properties End Here*****************************
def _initialize_table(self, column_count):
"""Sets the column count of the table.
This method is called to set the number of columns for the first time.
Parameters
----------
column_count : int
number of columns in the table
"""
header = [''] * column_count
alignment = [self.default_alignment] * column_count
width = [0] * column_count
padding = [self.default_padding] * column_count
self._column_count = column_count
self._column_headers = HeaderData(self, header)
self._column_alignments = AlignmentMetaData(self, alignment)
self._column_widths = PositiveIntegerMetaData(self, width)
self._left_padding_widths = PositiveIntegerMetaData(self, padding)
self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _validate_row(self, value, init_table_if_required=True):
# TODO: Rename this method
# str is also an iterable but it is not a valid row, so
# an extra check is required for str
if not isinstance(value, Iterable) or isinstance(value, basestring):
raise TypeError("parameter must be an iterable")
row = list(value)
if init_table_if_required and self._column_count == 0:
self._initialize_table(len(row))
if len(row) != self._column_count:
raise ValueError(("'Expected iterable of length {}, "
"got {}").format(self._column_count, len(row)))
return row
def __getitem__(self, key):
"""Get a row, or a column, or a new table by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, returns a row.
If key is an `str`, returns iterator to a column with header `key`.
If key is a slice object, returns a new table sliced according to
rows.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, slice):
new_table = copy.copy(self)
# Every child of BaseRow class needs to be reassigned so that
# They contain reference of the new table rather than the old
# This was a cause of a nasty bug once.
new_table.column_headers = self.column_headers
new_table.column_alignments = self.column_alignments
new_table.column_widths = self.column_widths
new_table.left_padding_widths = self.left_padding_widths
new_table.right_padding_widths = self.left_padding_widths
new_table._table = []
for row in self._table[key]:
new_table.append_row(row)
return new_table
elif isinstance(key, int):
return self._table[key]
elif isinstance(key, basestring):
return self.get_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __delitem__(self, key):
"""Delete a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, deletes a row.
If key is a slice object, deletes multiple rows.
If key is an `str`, delete the first column with heading `key`
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
KeyError
If `str` key is not found in headers.
"""
if isinstance(key, int) or isinstance(key, slice):
del self._table[key]
elif isinstance(key, basestring):
return self.pop_column(key)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __setitem__(self, key, value):
"""Update a row, or a column, or multiple rows by slicing.
Parameters
----------
key : int, slice, str
If key is an `int`, updates a row.
If key is an `str`, appends `column` to the list with header as
`key`.
If key is a slice object, updates multiple rows according to slice
rules.
Raises
------
TypeError
If key is not of type int, slice or str.
IndexError
If `int` key is out of range.
"""
if isinstance(key, (int, slice)):
self.update_row(key, value)
elif isinstance(key, basestring):
self.update_column(key, value)
else:
raise TypeError(("table indices must be integers, strings or "
"slices, not {}").format(type(key).__name__))
def __len__(self):
return len(self._table)
def __contains__(self, key):
if isinstance(key, basestring):
return key in self._column_headers
elif isinstance(key, Iterable):
return key in self._table
else:
raise TypeError(("'key' must be str or Iterable, "
"not {}").format(type(key).__name__))
def __iter__(self):
return iter(self._table)
def __next__(self):
return next(self._table)
def __repr__(self):
return repr(self._table)
def __str__(self):
return self.get_string()
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifulTable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifulTable.STYLE_DOTTED
* beautifulTable.STYLE_MYSQL
* beautifulTable.STYLE_SEPARATED
* beautifulTable.STYLE_COMPACT
* beautifulTable.STYLE_MARKDOWN
* beautifulTable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = ("{}.{}".format(type(self).__name__, i.name)
for i in enums.Style)
error_msg = ("allowed values for style are: "
+ ', '.join(allowed))
raise ValueError(error_msg)
style_template = style.value
self.left_border_char = style_template.left_border_char
self.right_border_char = style_template.right_border_char
self.top_border_char = style_template.top_border_char
self.bottom_border_char = style_template.bottom_border_char
self.header_separator_char = style_template.header_separator_char
self.column_separator_char = style_template.column_separator_char
self.row_separator_char = style_template.row_separator_char
self.intersect_top_left = style_template.intersect_top_left
self.intersect_top_mid = style_template.intersect_top_mid
self.intersect_top_right = style_template.intersect_top_right
self.intersect_header_left = style_template.intersect_header_left
self.intersect_header_mid = style_template.intersect_header_mid
self.intersect_header_right = style_template.intersect_header_right
self.intersect_row_left = style_template.intersect_row_left
self.intersect_row_mid = style_template.intersect_row_mid
self.intersect_row_right = style_template.intersect_row_right
self.intersect_bottom_left = style_template.intersect_bottom_left
self.intersect_bottom_mid = style_template.intersect_bottom_mid
self.intersect_bottom_right = style_template.intersect_bottom_right
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i]
def auto_calculate_width(self): # pragma : no cover
deprecation("'auto_calculate_width()' is deprecated")
self._calculate_column_widths()
def set_padding_widths(self, pad_width):
"""Set width for left and rigth padding of the columns of the table.
Parameters
----------
pad_width : array_like
pad widths for the columns.
"""
self.left_padding_widths = pad_width
self.right_padding_widths = pad_width
def sort(self, key, reverse=False):
"""Stable sort of the table *IN-PLACE* with respect to a column.
Parameters
----------
key: int, str
index or header of the column. Normal list rules apply.
reverse : bool
If `True` then table is sorted as if each comparison was reversed.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError("'key' must either be 'int' or 'str'")
self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def copy(self):
"""Return a shallow copy of the table.
Returns
-------
BeautifulTable:
shallow copy of the BeautifulTable instance.
"""
return self[:]
def get_column_header(self, index):
"""Get header of a column from it's index.
Parameters
----------
index: int
Normal list rules apply.
"""
return self._column_headers[index]
def get_column_index(self, header):
"""Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`.
"""
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header)))
def get_column(self, key):
"""Return an iterator to a column.
Parameters
----------
key : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If key is not of type `int`, or `str`.
Returns
-------
iter:
Iterator to the specified column.
"""
if isinstance(key, int):
index = key
elif isinstance(key, basestring):
index = self.get_column_index(key)
else:
raise TypeError(("key must be an int or str, "
"not {}").format(type(key).__name__))
return iter(map(operator.itemgetter(index), self._table))
def reverse(self):
"""Reverse the table row-wise *IN PLACE*."""
self._table.reverse()
def pop_row(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int
index of the row. Normal list rules apply.
"""
row = self._table.pop(index)
return row
def pop_column(self, index=-1):
"""Remove and return row at index (default last).
Parameters
----------
index : int, str
index of the column, or the header of the column.
If index is specified, then normal list rules apply.
Raises
------
TypeError:
If index is not an instance of `int`, or `str`.
IndexError:
If Table is empty.
"""
if isinstance(index, int):
pass
elif isinstance(index, basestring):
index = self.get_column_index(index)
else:
raise TypeError(("column index must be an integer or a string, "
"not {}").format(type(index).__name__))
if self._column_count == 0:
raise IndexError("pop from empty table")
if self._column_count == 1:
# This is the last column. So we should clear the table to avoid
# empty rows
self.clear(clear_metadata=True)
else:
# Not the last column. safe to pop from row
self._column_count -= 1
self._column_alignments._pop(index)
self._column_widths._pop(index)
self._left_padding_widths._pop(index)
self._right_padding_widths._pop(index)
self._column_headers._pop(index)
for row in self._table:
row._pop(index)
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj)
def append_row(self, row):
"""Append a row to end of the table.
Parameters
----------
row : iterable
Any iterable of appropriate length.
"""
self.insert_row(len(self._table), row)
def update_row(self, key, value):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
key : int or slice
index of the row, or a slice object.
value : iterable
If an index is specified, `value` should be an iterable
of appropriate length. Instead if a slice object is
passed as key, value should be an iterable of rows.
Raises
------
IndexError:
If index specified is out of range.
TypeError:
If `value` is of incorrect type.
ValueError:
If length of row does not matches number of columns.
"""
if isinstance(key, int):
row = self._validate_row(value, init_table_if_required=False)
row_obj = RowData(self, row)
self._table[key] = row_obj
elif isinstance(key, slice):
row_obj_list = []
for row in value:
row_ = self._validate_row(row, init_table_if_required=True)
row_obj_list.append(RowData(self, row_))
self._table[key] = row_obj_list
else:
raise TypeError("key must be an integer or a slice object")
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1))
def append_column(self, header, column):
"""Append a column to end of the table.
Parameters
----------
header : str
Title of the column
column : iterable
Any iterable of appropriate length.
"""
self.insert_column(self._column_count, header, column)
def clear(self, clear_metadata=False):
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
clear_metadata : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
# Cannot use clear method to support Python 2.7
del self._table[:]
if clear_metadata:
self._initialize_table(0)
def _get_horizontal_line(self, char, intersect_left,
intersect_mid, intersect_right):
"""Get a horizontal line for the table.
Internal method used to actually get all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
width = self.get_table_width()
try:
line = list(char * (int(width/termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [' '] * width
if len(line) == 0:
return ''
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.left_border_char) > 0:
if not (self.left_border_char.isspace() and visible_junc):
length = min(termwidth(self.left_border_char),
termwidth(intersect_left))
for i in range(length):
line[i] = intersect_left[i]
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.right_border_char) > 0:
if not (self.right_border_char.isspace() and visible_junc):
length = min(termwidth(self.right_border_char),
termwidth(intersect_right))
for i in range(length):
line[-i-1] = intersect_right[-i-1]
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.column_separator_char):
if not (self.column_separator_char.isspace() and visible_junc):
index = termwidth(self.left_border_char)
for i in range(self._column_count-1):
index += (self._column_widths[i])
length = min(termwidth(self.column_separator_char),
termwidth(intersect_mid))
for i in range(length):
line[index+i] = intersect_mid[i]
index += termwidth(self.column_separator_char)
return ''.join(line)
def _get_top_border(self):
return self._get_horizontal_line(self.top_border_char,
self.intersect_top_left,
self.intersect_top_mid,
self.intersect_top_right)
def get_top_border(self): # pragma : no cover
"""Get the Top border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as the Top border of the table.
"""
deprecation("'get_top_border()' is deprecated")
return self._get_top_border()
def _get_header_separator(self):
return self._get_horizontal_line(self.header_separator_char,
self.intersect_header_left,
self.intersect_header_mid,
self.intersect_header_right)
def get_header_separator(self): # pragma : no cover
"""Get the Header separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Header separator of the table.
"""
deprecation("'get_header_separator()' is deprecated")
return self._get_header_separator()
def _get_row_separator(self):
return self._get_horizontal_line(self.row_separator_char,
self.intersect_row_left,
self.intersect_row_mid,
self.intersect_row_right)
def get_row_separator(self): # pragma : no cover
"""Get the Row separator of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Row separator of the table.
"""
deprecation("'get_row_separator()' is deprecated")
return self._get_row_separator()
def _get_bottom_border(self):
return self._get_horizontal_line(self.bottom_border_char,
self.intersect_bottom_left,
self.intersect_bottom_mid,
self.intersect_bottom_right)
def get_bottom_border(self): # pragma : no cover
"""Get the Bottom border of table.
Column width should be set prior to calling this method.
Returns
-------
str
String which will be printed as Bottom border of the table.
"""
deprecation("'get_bottom_border()' is deprecated")
return self._get_bottom_border()
def get_table_width(self):
"""Get the width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if self.column_count == 0:
return 0
width = sum(self._column_widths)
width += ((self._column_count - 1)
* termwidth(self.column_separator_char))
width += termwidth(self.left_border_char)
width += termwidth(self.right_border_char)
return width
|
pri22296/beautifultable | beautifultable/utils.py | _convert_to_numeric | python | def _convert_to_numeric(item):
if PY3:
num_types = (int, float)
else: # pragma: no cover
num_types = (int, long, float) # noqa: F821
# We don't wan't to perform any conversions if item is already a number
if isinstance(item, num_types):
return item
# First try for an int conversion so that strings like "5" are converted
# to 5 instead of 5.0 . This is safe as a direct int cast for a non integer
# string raises a ValueError.
try:
num = int(to_unicode(item))
except ValueError:
try:
num = float(to_unicode(item))
except ValueError:
return item
else:
return num
except TypeError:
return item
else:
return num | Helper method to convert a string to float or int if possible.
If the conversion is not possible, it simply returns the string. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/utils.py#L11-L40 | null | """Module containing some utility methods"""
import warnings
from .ansi import ANSIMultiByteString
from .compat import to_unicode, PY3
from .exceptions import BeautifulTableDeprecationWarning
def get_output_str(item, detect_numerics, precision, sign_value):
"""Returns the final string which should be displayed"""
if detect_numerics:
item = _convert_to_numeric(item)
if isinstance(item, float):
item = round(item, precision)
try:
item = '{:{sign}}'.format(item, sign=sign_value)
except (ValueError, TypeError):
pass
return to_unicode(item)
def termwidth(item):
"""Returns the visible width of the string as shown on the terminal"""
obj = ANSIMultiByteString(to_unicode(item))
return obj.termwidth()
def textwrap(item, width):
obj = ANSIMultiByteString(to_unicode(item))
return obj.wrap(width)
def raise_suppressed(exp):
exp.__cause__ = None
raise exp
def deprecation(message):
warnings.warn(message, BeautifulTableDeprecationWarning)
|
pri22296/beautifultable | beautifultable/utils.py | get_output_str | python | def get_output_str(item, detect_numerics, precision, sign_value):
if detect_numerics:
item = _convert_to_numeric(item)
if isinstance(item, float):
item = round(item, precision)
try:
item = '{:{sign}}'.format(item, sign=sign_value)
except (ValueError, TypeError):
pass
return to_unicode(item) | Returns the final string which should be displayed | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/utils.py#L43-L53 | [
"def _convert_to_numeric(item):\n \"\"\"\n Helper method to convert a string to float or int if possible.\n\n If the conversion is not possible, it simply returns the string.\n \"\"\"\n if PY3:\n num_types = (int, float)\n else: # pragma: no cover\n num_types = (int, long, float) # noqa: F821\n # We don't wan't to perform any conversions if item is already a number\n if isinstance(item, num_types):\n return item\n\n # First try for an int conversion so that strings like \"5\" are converted\n # to 5 instead of 5.0 . This is safe as a direct int cast for a non integer\n # string raises a ValueError.\n try:\n num = int(to_unicode(item))\n except ValueError:\n try:\n num = float(to_unicode(item))\n except ValueError:\n return item\n else:\n return num\n except TypeError:\n return item\n else:\n return num\n"
] | """Module containing some utility methods"""
import warnings
from .ansi import ANSIMultiByteString
from .compat import to_unicode, PY3
from .exceptions import BeautifulTableDeprecationWarning
def _convert_to_numeric(item):
"""
Helper method to convert a string to float or int if possible.
If the conversion is not possible, it simply returns the string.
"""
if PY3:
num_types = (int, float)
else: # pragma: no cover
num_types = (int, long, float) # noqa: F821
# We don't wan't to perform any conversions if item is already a number
if isinstance(item, num_types):
return item
# First try for an int conversion so that strings like "5" are converted
# to 5 instead of 5.0 . This is safe as a direct int cast for a non integer
# string raises a ValueError.
try:
num = int(to_unicode(item))
except ValueError:
try:
num = float(to_unicode(item))
except ValueError:
return item
else:
return num
except TypeError:
return item
else:
return num
def termwidth(item):
"""Returns the visible width of the string as shown on the terminal"""
obj = ANSIMultiByteString(to_unicode(item))
return obj.termwidth()
def textwrap(item, width):
obj = ANSIMultiByteString(to_unicode(item))
return obj.wrap(width)
def raise_suppressed(exp):
exp.__cause__ = None
raise exp
def deprecation(message):
warnings.warn(message, BeautifulTableDeprecationWarning)
|
pri22296/beautifultable | beautifultable/rows.py | RowData._get_row_within_width | python | def _get_row_within_width(self, row):
table = self._table
lpw, rpw = table.left_padding_widths, table.right_padding_widths
wep = table.width_exceed_policy
list_of_rows = []
if (wep is WidthExceedPolicy.WEP_STRIP or
wep is WidthExceedPolicy.WEP_ELLIPSIS):
# Let's strip the row
delimiter = '' if wep is WidthExceedPolicy.WEP_STRIP else '...'
row_item_list = []
for index, row_item in enumerate(row):
left_pad = table._column_pad * lpw[index]
right_pad = table._column_pad * rpw[index]
clmp_str = (left_pad
+ self._clamp_string(row_item, index, delimiter)
+ right_pad)
row_item_list.append(clmp_str)
list_of_rows.append(row_item_list)
elif wep is WidthExceedPolicy.WEP_WRAP:
# Let's wrap the row
string_partition = []
for index, row_item in enumerate(row):
width = table.column_widths[index] - lpw[index] - rpw[index]
string_partition.append(textwrap(row_item, width))
for row_items in zip_longest(*string_partition, fillvalue=''):
row_item_list = []
for index, row_item in enumerate(row_items):
left_pad = table._column_pad * lpw[index]
right_pad = table._column_pad * rpw[index]
row_item_list.append(left_pad + row_item + right_pad)
list_of_rows.append(row_item_list)
if len(list_of_rows) == 0:
return [[''] * table.column_count]
else:
return list_of_rows | Process a row so that it is clamped by column_width.
Parameters
----------
row : array_like
A single row.
Returns
-------
list of list:
List representation of the `row` after it has been processed
according to width exceed policy. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/rows.py#L9-L63 | [
"def textwrap(item, width):\n obj = ANSIMultiByteString(to_unicode(item))\n return obj.wrap(width)\n",
"def _clamp_string(self, row_item, column_index, delimiter=''):\n \"\"\"Clamp `row_item` to fit in column referred by column_index.\n\n This method considers padding and appends the delimiter if `row_item`\n needs to be truncated.\n\n Parameters\n ----------\n row_item: str\n String which should be clamped.\n\n column_index: int\n Index of the column `row_item` belongs to.\n\n delimiter: str\n String which is to be appended to the clamped string.\n\n Returns\n -------\n str\n The modified string which fits in it's column.\n \"\"\"\n width = (self._table.column_widths[column_index]\n - self._table.left_padding_widths[column_index]\n - self._table.right_padding_widths[column_index])\n\n if termwidth(row_item) <= width:\n return row_item\n else:\n if width - len(delimiter) >= 0:\n clamped_string = (textwrap(row_item, width-len(delimiter))[0]\n + delimiter)\n else:\n clamped_string = delimiter[:width]\n return clamped_string\n"
] | class RowData(BaseRow):
def _clamp_string(self, row_item, column_index, delimiter=''):
"""Clamp `row_item` to fit in column referred by column_index.
This method considers padding and appends the delimiter if `row_item`
needs to be truncated.
Parameters
----------
row_item: str
String which should be clamped.
column_index: int
Index of the column `row_item` belongs to.
delimiter: str
String which is to be appended to the clamped string.
Returns
-------
str
The modified string which fits in it's column.
"""
width = (self._table.column_widths[column_index]
- self._table.left_padding_widths[column_index]
- self._table.right_padding_widths[column_index])
if termwidth(row_item) <= width:
return row_item
else:
if width - len(delimiter) >= 0:
clamped_string = (textwrap(row_item, width-len(delimiter))[0]
+ delimiter)
else:
clamped_string = delimiter[:width]
return clamped_string
def __str__(self):
"""Return a string representation of a row."""
rows = []
table = self._table
width = table.column_widths
align = table.column_alignments
sign = table.sign_mode
lpw = table.left_padding_widths
rpw = table.right_padding_widths
string = []
for i, item in enumerate(self._row):
if isinstance(item, type(table)):
# temporarily change the max width of the table
curr_max_width = item.max_table_width
item.max_table_width = width[i] - lpw[i] - rpw[i]
rows.append(to_unicode(item).split('\n'))
item.max_table_width = curr_max_width
else:
rows.append(to_unicode(item).split('\n'))
for row in map(list, zip_longest(*rows, fillvalue='')):
for i in range(len(row)):
row[i] = get_output_str(row[i], table.detect_numerics,
table.numeric_precision, sign.value)
list_of_rows = self._get_row_within_width(row)
for row_ in list_of_rows:
for i in range(table.column_count):
# str.format method doesn't work for multibyte strings
# hence, we need to manually align the texts instead
# of using the align property of the str.format method
pad_len = width[i] - termwidth(row_[i])
if align[i].value == '<':
right_pad = ' ' * pad_len
row_[i] = to_unicode(row_[i]) + right_pad
elif align[i].value == '>':
left_pad = ' ' * pad_len
row_[i] = left_pad + to_unicode(row_[i])
else:
left_pad = ' ' * (pad_len//2)
right_pad = ' ' * (pad_len - pad_len//2)
row_[i] = left_pad + to_unicode(row_[i]) + right_pad
content = table.column_separator_char.join(row_)
content = table.left_border_char + content
content += table.right_border_char
string.append(content)
return '\n'.join(string)
|
pri22296/beautifultable | beautifultable/rows.py | RowData._clamp_string | python | def _clamp_string(self, row_item, column_index, delimiter=''):
width = (self._table.column_widths[column_index]
- self._table.left_padding_widths[column_index]
- self._table.right_padding_widths[column_index])
if termwidth(row_item) <= width:
return row_item
else:
if width - len(delimiter) >= 0:
clamped_string = (textwrap(row_item, width-len(delimiter))[0]
+ delimiter)
else:
clamped_string = delimiter[:width]
return clamped_string | Clamp `row_item` to fit in column referred by column_index.
This method considers padding and appends the delimiter if `row_item`
needs to be truncated.
Parameters
----------
row_item: str
String which should be clamped.
column_index: int
Index of the column `row_item` belongs to.
delimiter: str
String which is to be appended to the clamped string.
Returns
-------
str
The modified string which fits in it's column. | train | https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/rows.py#L65-L99 | null | class RowData(BaseRow):
def _get_row_within_width(self, row):
"""Process a row so that it is clamped by column_width.
Parameters
----------
row : array_like
A single row.
Returns
-------
list of list:
List representation of the `row` after it has been processed
according to width exceed policy.
"""
table = self._table
lpw, rpw = table.left_padding_widths, table.right_padding_widths
wep = table.width_exceed_policy
list_of_rows = []
if (wep is WidthExceedPolicy.WEP_STRIP or
wep is WidthExceedPolicy.WEP_ELLIPSIS):
# Let's strip the row
delimiter = '' if wep is WidthExceedPolicy.WEP_STRIP else '...'
row_item_list = []
for index, row_item in enumerate(row):
left_pad = table._column_pad * lpw[index]
right_pad = table._column_pad * rpw[index]
clmp_str = (left_pad
+ self._clamp_string(row_item, index, delimiter)
+ right_pad)
row_item_list.append(clmp_str)
list_of_rows.append(row_item_list)
elif wep is WidthExceedPolicy.WEP_WRAP:
# Let's wrap the row
string_partition = []
for index, row_item in enumerate(row):
width = table.column_widths[index] - lpw[index] - rpw[index]
string_partition.append(textwrap(row_item, width))
for row_items in zip_longest(*string_partition, fillvalue=''):
row_item_list = []
for index, row_item in enumerate(row_items):
left_pad = table._column_pad * lpw[index]
right_pad = table._column_pad * rpw[index]
row_item_list.append(left_pad + row_item + right_pad)
list_of_rows.append(row_item_list)
if len(list_of_rows) == 0:
return [[''] * table.column_count]
else:
return list_of_rows
def __str__(self):
"""Return a string representation of a row."""
rows = []
table = self._table
width = table.column_widths
align = table.column_alignments
sign = table.sign_mode
lpw = table.left_padding_widths
rpw = table.right_padding_widths
string = []
for i, item in enumerate(self._row):
if isinstance(item, type(table)):
# temporarily change the max width of the table
curr_max_width = item.max_table_width
item.max_table_width = width[i] - lpw[i] - rpw[i]
rows.append(to_unicode(item).split('\n'))
item.max_table_width = curr_max_width
else:
rows.append(to_unicode(item).split('\n'))
for row in map(list, zip_longest(*rows, fillvalue='')):
for i in range(len(row)):
row[i] = get_output_str(row[i], table.detect_numerics,
table.numeric_precision, sign.value)
list_of_rows = self._get_row_within_width(row)
for row_ in list_of_rows:
for i in range(table.column_count):
# str.format method doesn't work for multibyte strings
# hence, we need to manually align the texts instead
# of using the align property of the str.format method
pad_len = width[i] - termwidth(row_[i])
if align[i].value == '<':
right_pad = ' ' * pad_len
row_[i] = to_unicode(row_[i]) + right_pad
elif align[i].value == '>':
left_pad = ' ' * pad_len
row_[i] = left_pad + to_unicode(row_[i])
else:
left_pad = ' ' * (pad_len//2)
right_pad = ' ' * (pad_len - pad_len//2)
row_[i] = left_pad + to_unicode(row_[i]) + right_pad
content = table.column_separator_char.join(row_)
content = table.left_border_char + content
content += table.right_border_char
string.append(content)
return '\n'.join(string)
|
ml31415/numpy-groupies | numpy_groupies/benchmarks/simple.py | aggregate_group_loop | python | def aggregate_group_loop(*args, **kwargs):
func = kwargs['func']
del kwargs['func']
return aggregate_np(*args, func=lambda x: func(x), **kwargs) | wraps func in lambda which prevents aggregate_numpy from
recognising and optimising it. Instead it groups and loops. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/benchmarks/simple.py#L14-L19 | [
"def dummy_no_impl(*args, **kwargs):\n raise NotImplementedError(\"You may need to install another package (numpy, \"\n \"weave, or numba) to access a working implementation.\")\n",
"def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',\n dtype=None, axis=None, **kwargs):\n return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,\n order=order, dtype=dtype, func=func, axis=axis,\n _impl_dict=_impl_dict, _nansqueeze=True, **kwargs)\n"
] | #!/usr/bin/python -B
# -*- coding: utf-8 -*-
from __future__ import print_function
import timeit
import numpy as np
from numpy_groupies.utils import aliasing
from numpy_groupies import aggregate_py, aggregate_np, aggregate_ufunc
from numpy_groupies.aggregate_pandas import aggregate as aggregate_pd
print("TODO: use more extensive tests")
print("")
print("-----simple examples----------")
test_a = np.array([12.0, 3.2, -15, 88, 12.9])
test_group_idx = np.array([1, 0, 1, 4, 1 ])
print("test_a: ", test_a)
print("test_group_idx: ", test_group_idx)
print("aggregate(test_group_idx, test_a):")
print(aggregate_np(test_group_idx, test_a)) # group vals by idx and sum
# array([3.2, 9.9, 0., 0., 88.])
print("aggregate(test_group_idx, test_a, sz=8, func='min', fill_value=np.nan):")
print(aggregate_np(test_group_idx, test_a, size=8, func='min', fill_value=np.nan))
# array([3.2, -15., nan, 88., nan, nan, nan, nan])
print("aggregate(test_group_idx, test_a, sz=5, func=lambda x: ' + '.join(str(xx) for xx in x),fill_value='')")
print(aggregate_np(test_group_idx, test_a, size=5, func=lambda x: ' + '.join(str(xx) for xx in x), fill_value=''))
print("")
print("---------testing--------------")
print("compare against group-and-loop with numpy")
testable_funcs = {aliasing[f]: f for f in (np.sum, np.prod, np.any, np.all, np.min, np.max, np.std, np.var, np.mean)}
test_group_idx = np.random.randint(0, int(1e3), int(1e5))
test_a = np.random.rand(int(1e5)) * 100 - 50
test_a[test_a > 25] = 0 # for use with bool functions
for name, f in testable_funcs.items():
numpy_loop_group = aggregate_group_loop(test_group_idx, test_a, func=f)
for acc_func, acc_name in [(aggregate_np, 'np-optimised'),
(aggregate_ufunc, 'np-ufunc-at'),
(aggregate_py, 'purepy'),
(aggregate_pd, 'pandas')]:
try:
test_out = acc_func(test_group_idx, test_a, func=name)
test_out = np.asarray(test_out)
if not np.allclose(test_out, numpy_loop_group.astype(test_out.dtype)):
print(name, acc_name, "FAILED test, output: [" + acc_name + "; correct]...")
print(np.vstack((test_out, numpy_loop_group)))
else:
print(name, acc_name, "PASSED test")
except NotImplementedError:
print(name, acc_name, "NOT IMPLEMENTED")
print("")
print("----------benchmarking-------------")
print("Note that the actual observed speedup depends on a variety of properties of the input.")
print("Here we are using 100,000 indices uniformly picked from [0, 1000).")
print("Specifically, about 25% of the values are 0 (for use with bool operations),")
print("the remainder are uniformly distribuited on [-50,25).")
print("Times are scaled to 10 repetitions (actual number of reps used may not be 10).")
print(''.join(['function'.rjust(8), 'pure-py'.rjust(14), 'np-grouploop'.rjust(14),
'np-ufuncat'.rjust(14), 'np-optimised'.rjust(14), 'pandas'.rjust(14),
'ratio'.rjust(15)]))
for name, f in testable_funcs.items():
print(name.rjust(8), end='')
times = [None] * 5
for ii, acc_func in enumerate([aggregate_py, aggregate_group_loop,
aggregate_ufunc, aggregate_np,
aggregate_pd]):
try:
func = f if acc_func is aggregate_group_loop else name
reps = 3 if acc_func is aggregate_py else 20
times[ii] = timeit.Timer(lambda: acc_func(test_group_idx, test_a, func=func)).timeit(number=reps) / reps * 10
print(("%.1fms" % ((times[ii] * 1000))).rjust(13), end='')
except NotImplementedError:
print("no-impl".rjust(13), end='')
denom = min(t for t in times if t is not None)
ratios = [("-".center(4) if t is None else str(round(t / denom, 1))).center(5) for t in times]
print(" ", (":".join(ratios)))
|
ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | step_count | python | def step_count(group_idx):
cmp_pos = 0
steps = 1
if len(group_idx) < 1:
return 0
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
steps += 1
return steps | Return the amount of index changes within group_idx. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L445-L455 | null | from __future__ import division
import numba as nb
import numpy as np
from .utils import get_func, isstr, aggregate_common_doc, funcs_no_separate_nan
from .utils_numpy import aliasing, input_validation, check_dtype, check_fill_value
class AggregateOp(object):
"""
Every subclass of AggregateOp handles a different aggregation operation. There are
several private class methods that need to be overwritten by the subclasses
in order to implement different functionality.
On object instantiation, all necessary static methods are compiled together into
two jitted callables, one for scalar arguments, and one for arrays. Calling the
instantiated object picks the right cached callable, does some further preprocessing
and then executes the actual aggregation operation.
"""
forced_fill_value = None
counter_fill_value = 1
counter_dtype = bool
mean_fill_value = None
mean_dtype = np.float64
outer = False
reverse = False
nans = False
def __init__(self, func=None, **kwargs):
if func is None:
func = type(self).__name__.lower()
self.func = func
self.__dict__.update(kwargs)
# Cache the compiled functions, so they don't have to be recompiled on every call
self._jit_scalar = self.callable(self.nans, self.reverse, scalar=True)
self._jit_non_scalar = self.callable(self.nans, self.reverse, scalar=False)
def __call__(self, group_idx, a, size=None, fill_value=0, order='C',
dtype=None, axis=None, ddof=0):
iv = input_validation(group_idx, a, size=size, order=order, axis=axis, check_bounds=False)
group_idx, a, flat_size, ndim_idx, size = iv
# TODO: The typecheck should be done by the class itself, not by check_dtype
dtype = check_dtype(dtype, self.func, a, len(group_idx))
check_fill_value(fill_value, dtype)
input_dtype = type(a) if np.isscalar(a) else a.dtype
ret, counter, mean, outer = self._initialize(flat_size, fill_value, dtype, input_dtype, group_idx.size)
group_idx = np.ascontiguousarray(group_idx)
if not np.isscalar(a):
a = np.ascontiguousarray(a)
jitfunc = self._jit_non_scalar
else:
jitfunc = self._jit_scalar
jitfunc(group_idx, a, ret, counter, mean, outer, fill_value, ddof)
self._finalize(ret, counter, fill_value)
if self.outer:
return outer
# Deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
@classmethod
def _initialize(cls, flat_size, fill_value, dtype, input_dtype, input_size):
if cls.forced_fill_value is None:
ret = np.full(flat_size, fill_value, dtype=dtype)
else:
ret = np.full(flat_size, cls.forced_fill_value, dtype=dtype)
counter = mean = outer = None
if cls.counter_fill_value is not None:
counter = np.full_like(ret, cls.counter_fill_value, dtype=cls.counter_dtype)
if cls.mean_fill_value is not None:
dtype = cls.mean_dtype if cls.mean_dtype else input_dtype
mean = np.full_like(ret, cls.mean_fill_value, dtype=dtype)
if cls.outer:
outer = np.full(input_size, fill_value, dtype=dtype)
return ret, counter, mean, outer
@classmethod
def _finalize(cls, ret, counter, fill_value):
if cls.forced_fill_value is not None and fill_value != cls.forced_fill_value:
ret[counter] = fill_value
@classmethod
def callable(cls, nans=False, reverse=False, scalar=False):
""" Compile a jitted function doing the hard part of the job """
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter
valgetter = nb.njit(_valgetter)
outersetter = nb.njit(cls._outersetter)
_cls_inner = nb.njit(cls._inner)
if nans:
def _inner(ri, val, ret, counter, mean):
if not np.isnan(val):
_cls_inner(ri, val, ret, counter, mean)
inner = nb.njit(_inner)
else:
inner = _cls_inner
def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
# fill_value and ddof need to be present for being exchangeable with loop_2pass
size = len(ret)
rng = range(len(group_idx) - 1, -1 , -1) if reverse else range(len(group_idx))
for i in rng:
ri = group_idx[i]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
val = valgetter(a, i)
inner(ri, val, ret, counter, mean)
outersetter(outer, i, ret[ri])
return nb.njit(_loop, nogil=True)
@staticmethod
def _valgetter(a, i):
return a[i]
@staticmethod
def _valgetter_scalar(a, i):
return a
@staticmethod
def _inner(ri, val, ret, counter, mean):
raise NotImplementedError("subclasses need to overwrite _inner")
@staticmethod
def _outersetter(outer, i, val):
pass
class Aggregate2pass(AggregateOp):
"""Base class for everything that needs to process the data twice like mean, var and std."""
@classmethod
def callable(cls, nans=False, reverse=False, scalar=False):
# Careful, cls needs to be passed, so that the overwritten methods remain available in
# AggregateOp.callable
loop = super(Aggregate2pass, cls).callable(nans=nans, reverse=reverse, scalar=scalar)
_2pass_inner = nb.njit(cls._2pass_inner)
def _loop2(ret, counter, mean, fill_value, ddof):
for ri in range(len(ret)):
if counter[ri]:
ret[ri] = _2pass_inner(ri, ret, counter, mean, ddof)
else:
ret[ri] = fill_value
loop2 = nb.njit(_loop2)
def _loop_2pass(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof)
loop2(ret, counter, mean, fill_value, ddof)
return nb.njit(_loop_2pass)
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
raise NotImplementedError("subclasses need to overwrite _2pass_inner")
@classmethod
def _finalize(cls, ret, counter, fill_value):
"""Copying the fill value is already done in the 2nd pass"""
pass
class AggregateNtoN(AggregateOp):
"""Base class for cumulative functions, where the output size matches the input size."""
outer = True
@staticmethod
def _outersetter(outer, i, val):
outer[i] = val
class AggregateGeneric(AggregateOp):
"""Base class for jitting arbitrary functions."""
counter_fill_value = None
def __init__(self, func, **kwargs):
self.func = func
self.__dict__.update(kwargs)
self._jitfunc = self.callable(self.nans)
def __call__(self, group_idx, a, size=None, fill_value=0, order='C',
dtype=None, axis=None, ddof=0):
iv = input_validation(group_idx, a, size=size, order=order, axis=axis, check_bounds=False)
group_idx, a, flat_size, ndim_idx, size = iv
# TODO: The typecheck should be done by the class itself, not by check_dtype
dtype = check_dtype(dtype, self.func, a, len(group_idx))
check_fill_value(fill_value, dtype)
input_dtype = type(a) if np.isscalar(a) else a.dtype
ret, _, _, _= self._initialize(flat_size, fill_value, dtype, input_dtype, group_idx.size)
group_idx = np.ascontiguousarray(group_idx)
sortidx = np.argsort(group_idx, kind='mergesort')
self._jitfunc(sortidx, group_idx, a, ret)
# Deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
def callable(self, nans=False):
"""Compile a jitted function and loop it over the sorted data."""
jitfunc = nb.njit(self.func, nogil=True)
def _loop(sortidx, group_idx, a, ret):
size = len(ret)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
indices = step_indices(group_idx_srt)
for i in range(len(indices) - 1):
start_idx, stop_idx = indices[i], indices[i + 1]
ri = group_idx_srt[start_idx]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
ret[ri] = jitfunc(a_srt[start_idx:stop_idx])
return nb.njit(_loop, nogil=True)
class Sum(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] += val
class Prod(AggregateOp):
forced_fill_value = 1
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] *= val
class Len(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] += 1
class All(AggregateOp):
forced_fill_value = 1
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] &= bool(val)
class Any(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] |= bool(val)
class Last(AggregateOp):
counter_fill_value = None
@staticmethod
def _inner(ri, val, ret, counter, mean):
ret[ri] = val
class First(Last):
reverse = True
class AllNan(AggregateOp):
forced_fill_value = 1
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] &= val == val
class AnyNan(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] |= val != val
class Max(AggregateOp):
@staticmethod
def _inner(ri, val, ret, counter, mean):
if counter[ri]:
ret[ri] = val
counter[ri] = 0
elif ret[ri] < val:
ret[ri] = val
class Min(AggregateOp):
@staticmethod
def _inner(ri, val, ret, counter, mean):
if counter[ri]:
ret[ri] = val
counter[ri] = 0
elif ret[ri] > val:
ret[ri] = val
class ArgMax(AggregateOp):
mean_fill_value = np.nan
@staticmethod
def _valgetter(a, i):
return a[i], i
@staticmethod
def _inner(ri, val, ret, counter, mean):
cmp_val, arg = val
if counter[ri]:
mean[ri] = cmp_val
ret[ri] = arg
counter[ri] = 0
elif mean[ri] < cmp_val:
mean[ri] = cmp_val
ret[ri] = arg
class ArgMin(ArgMax):
@staticmethod
def _inner(ri, val, ret, counter, mean):
cmp_val, arg = val
if counter[ri]:
mean[ri] = cmp_val
ret[ri] = arg
counter[ri] = 0
elif mean[ri] > cmp_val:
mean[ri] = cmp_val
ret[ri] = arg
class Mean(Aggregate2pass):
counter_fill_value = 0
counter_dtype = int
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] += 1
ret[ri] += val
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
return ret[ri] / counter[ri]
class Std(Mean):
mean_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] += 1
mean[ri] += val
ret[ri] += val * val
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
mean2 = mean[ri] * mean[ri]
return np.sqrt((ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof))
class Var(Std):
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
mean2 = mean[ri] * mean[ri]
return (ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof)
class CumSum(AggregateNtoN, Sum):
pass
class CumProd(AggregateNtoN, Prod):
pass
class CumMax(AggregateNtoN, Max):
pass
class CumMin(AggregateNtoN, Min):
pass
def get_funcs():
funcs = dict()
for op in (Sum, Prod, Len, All, Any, Last, First, AllNan, AnyNan, Min, Max,
ArgMin, ArgMax, Mean, Std, Var,
CumSum, CumProd, CumMax, CumMin):
funcname = op.__name__.lower()
funcs[funcname] = op(funcname)
if funcname not in funcs_no_separate_nan:
funcname = 'nan' + funcname
funcs[funcname] = op(funcname, nans=True)
return funcs
_impl_dict = get_funcs()
_default_cache = {}
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, cache=None, **kwargs):
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
if cache in (None, False):
aggregate_op = AggregateGeneric(func)
else:
if cache is True:
cache = _default_cache
aggregate_op = cache.setdefault(func, AggregateGeneric(func))
return aggregate_op(group_idx, a, size, fill_value, order, dtype, axis, **kwargs)
else:
func = _impl_dict[func]
return func(group_idx, a, size, fill_value, order, dtype, axis, **kwargs)
aggregate.__doc__ = """
This is the numba implementation of aggregate.
""" + aggregate_common_doc
@nb.njit(nogil=True, cache=True)
@nb.njit(nogil=True, cache=True)
def step_indices(group_idx):
"""Return the edges of areas within group_idx, which are filled with the same value."""
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, np.int64)
indices[0] = 0
indices[-1] = group_idx.size
cmp_pos = 0
ri = 1
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
indices[ri] = i
ri += 1
return indices
|
ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | step_indices | python | def step_indices(group_idx):
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, np.int64)
indices[0] = 0
indices[-1] = group_idx.size
cmp_pos = 0
ri = 1
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
indices[ri] = i
ri += 1
return indices | Return the edges of areas within group_idx, which are filled with the same value. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L459-L472 | null | from __future__ import division
import numba as nb
import numpy as np
from .utils import get_func, isstr, aggregate_common_doc, funcs_no_separate_nan
from .utils_numpy import aliasing, input_validation, check_dtype, check_fill_value
class AggregateOp(object):
"""
Every subclass of AggregateOp handles a different aggregation operation. There are
several private class methods that need to be overwritten by the subclasses
in order to implement different functionality.
On object instantiation, all necessary static methods are compiled together into
two jitted callables, one for scalar arguments, and one for arrays. Calling the
instantiated object picks the right cached callable, does some further preprocessing
and then executes the actual aggregation operation.
"""
forced_fill_value = None
counter_fill_value = 1
counter_dtype = bool
mean_fill_value = None
mean_dtype = np.float64
outer = False
reverse = False
nans = False
def __init__(self, func=None, **kwargs):
if func is None:
func = type(self).__name__.lower()
self.func = func
self.__dict__.update(kwargs)
# Cache the compiled functions, so they don't have to be recompiled on every call
self._jit_scalar = self.callable(self.nans, self.reverse, scalar=True)
self._jit_non_scalar = self.callable(self.nans, self.reverse, scalar=False)
def __call__(self, group_idx, a, size=None, fill_value=0, order='C',
dtype=None, axis=None, ddof=0):
iv = input_validation(group_idx, a, size=size, order=order, axis=axis, check_bounds=False)
group_idx, a, flat_size, ndim_idx, size = iv
# TODO: The typecheck should be done by the class itself, not by check_dtype
dtype = check_dtype(dtype, self.func, a, len(group_idx))
check_fill_value(fill_value, dtype)
input_dtype = type(a) if np.isscalar(a) else a.dtype
ret, counter, mean, outer = self._initialize(flat_size, fill_value, dtype, input_dtype, group_idx.size)
group_idx = np.ascontiguousarray(group_idx)
if not np.isscalar(a):
a = np.ascontiguousarray(a)
jitfunc = self._jit_non_scalar
else:
jitfunc = self._jit_scalar
jitfunc(group_idx, a, ret, counter, mean, outer, fill_value, ddof)
self._finalize(ret, counter, fill_value)
if self.outer:
return outer
# Deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
@classmethod
def _initialize(cls, flat_size, fill_value, dtype, input_dtype, input_size):
if cls.forced_fill_value is None:
ret = np.full(flat_size, fill_value, dtype=dtype)
else:
ret = np.full(flat_size, cls.forced_fill_value, dtype=dtype)
counter = mean = outer = None
if cls.counter_fill_value is not None:
counter = np.full_like(ret, cls.counter_fill_value, dtype=cls.counter_dtype)
if cls.mean_fill_value is not None:
dtype = cls.mean_dtype if cls.mean_dtype else input_dtype
mean = np.full_like(ret, cls.mean_fill_value, dtype=dtype)
if cls.outer:
outer = np.full(input_size, fill_value, dtype=dtype)
return ret, counter, mean, outer
@classmethod
def _finalize(cls, ret, counter, fill_value):
if cls.forced_fill_value is not None and fill_value != cls.forced_fill_value:
ret[counter] = fill_value
@classmethod
def callable(cls, nans=False, reverse=False, scalar=False):
""" Compile a jitted function doing the hard part of the job """
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter
valgetter = nb.njit(_valgetter)
outersetter = nb.njit(cls._outersetter)
_cls_inner = nb.njit(cls._inner)
if nans:
def _inner(ri, val, ret, counter, mean):
if not np.isnan(val):
_cls_inner(ri, val, ret, counter, mean)
inner = nb.njit(_inner)
else:
inner = _cls_inner
def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
# fill_value and ddof need to be present for being exchangeable with loop_2pass
size = len(ret)
rng = range(len(group_idx) - 1, -1 , -1) if reverse else range(len(group_idx))
for i in rng:
ri = group_idx[i]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
val = valgetter(a, i)
inner(ri, val, ret, counter, mean)
outersetter(outer, i, ret[ri])
return nb.njit(_loop, nogil=True)
@staticmethod
def _valgetter(a, i):
return a[i]
@staticmethod
def _valgetter_scalar(a, i):
return a
@staticmethod
def _inner(ri, val, ret, counter, mean):
raise NotImplementedError("subclasses need to overwrite _inner")
@staticmethod
def _outersetter(outer, i, val):
pass
class Aggregate2pass(AggregateOp):
"""Base class for everything that needs to process the data twice like mean, var and std."""
@classmethod
def callable(cls, nans=False, reverse=False, scalar=False):
# Careful, cls needs to be passed, so that the overwritten methods remain available in
# AggregateOp.callable
loop = super(Aggregate2pass, cls).callable(nans=nans, reverse=reverse, scalar=scalar)
_2pass_inner = nb.njit(cls._2pass_inner)
def _loop2(ret, counter, mean, fill_value, ddof):
for ri in range(len(ret)):
if counter[ri]:
ret[ri] = _2pass_inner(ri, ret, counter, mean, ddof)
else:
ret[ri] = fill_value
loop2 = nb.njit(_loop2)
def _loop_2pass(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof)
loop2(ret, counter, mean, fill_value, ddof)
return nb.njit(_loop_2pass)
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
raise NotImplementedError("subclasses need to overwrite _2pass_inner")
@classmethod
def _finalize(cls, ret, counter, fill_value):
"""Copying the fill value is already done in the 2nd pass"""
pass
class AggregateNtoN(AggregateOp):
"""Base class for cumulative functions, where the output size matches the input size."""
outer = True
@staticmethod
def _outersetter(outer, i, val):
outer[i] = val
class AggregateGeneric(AggregateOp):
"""Base class for jitting arbitrary functions."""
counter_fill_value = None
def __init__(self, func, **kwargs):
self.func = func
self.__dict__.update(kwargs)
self._jitfunc = self.callable(self.nans)
def __call__(self, group_idx, a, size=None, fill_value=0, order='C',
dtype=None, axis=None, ddof=0):
iv = input_validation(group_idx, a, size=size, order=order, axis=axis, check_bounds=False)
group_idx, a, flat_size, ndim_idx, size = iv
# TODO: The typecheck should be done by the class itself, not by check_dtype
dtype = check_dtype(dtype, self.func, a, len(group_idx))
check_fill_value(fill_value, dtype)
input_dtype = type(a) if np.isscalar(a) else a.dtype
ret, _, _, _= self._initialize(flat_size, fill_value, dtype, input_dtype, group_idx.size)
group_idx = np.ascontiguousarray(group_idx)
sortidx = np.argsort(group_idx, kind='mergesort')
self._jitfunc(sortidx, group_idx, a, ret)
# Deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
def callable(self, nans=False):
"""Compile a jitted function and loop it over the sorted data."""
jitfunc = nb.njit(self.func, nogil=True)
def _loop(sortidx, group_idx, a, ret):
size = len(ret)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
indices = step_indices(group_idx_srt)
for i in range(len(indices) - 1):
start_idx, stop_idx = indices[i], indices[i + 1]
ri = group_idx_srt[start_idx]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
ret[ri] = jitfunc(a_srt[start_idx:stop_idx])
return nb.njit(_loop, nogil=True)
class Sum(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] += val
class Prod(AggregateOp):
forced_fill_value = 1
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] *= val
class Len(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] += 1
class All(AggregateOp):
forced_fill_value = 1
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] &= bool(val)
class Any(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] |= bool(val)
class Last(AggregateOp):
counter_fill_value = None
@staticmethod
def _inner(ri, val, ret, counter, mean):
ret[ri] = val
class First(Last):
reverse = True
class AllNan(AggregateOp):
forced_fill_value = 1
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] &= val == val
class AnyNan(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] |= val != val
class Max(AggregateOp):
@staticmethod
def _inner(ri, val, ret, counter, mean):
if counter[ri]:
ret[ri] = val
counter[ri] = 0
elif ret[ri] < val:
ret[ri] = val
class Min(AggregateOp):
@staticmethod
def _inner(ri, val, ret, counter, mean):
if counter[ri]:
ret[ri] = val
counter[ri] = 0
elif ret[ri] > val:
ret[ri] = val
class ArgMax(AggregateOp):
mean_fill_value = np.nan
@staticmethod
def _valgetter(a, i):
return a[i], i
@staticmethod
def _inner(ri, val, ret, counter, mean):
cmp_val, arg = val
if counter[ri]:
mean[ri] = cmp_val
ret[ri] = arg
counter[ri] = 0
elif mean[ri] < cmp_val:
mean[ri] = cmp_val
ret[ri] = arg
class ArgMin(ArgMax):
@staticmethod
def _inner(ri, val, ret, counter, mean):
cmp_val, arg = val
if counter[ri]:
mean[ri] = cmp_val
ret[ri] = arg
counter[ri] = 0
elif mean[ri] > cmp_val:
mean[ri] = cmp_val
ret[ri] = arg
class Mean(Aggregate2pass):
counter_fill_value = 0
counter_dtype = int
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] += 1
ret[ri] += val
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
return ret[ri] / counter[ri]
class Std(Mean):
mean_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] += 1
mean[ri] += val
ret[ri] += val * val
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
mean2 = mean[ri] * mean[ri]
return np.sqrt((ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof))
class Var(Std):
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
mean2 = mean[ri] * mean[ri]
return (ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof)
class CumSum(AggregateNtoN, Sum):
pass
class CumProd(AggregateNtoN, Prod):
pass
class CumMax(AggregateNtoN, Max):
pass
class CumMin(AggregateNtoN, Min):
pass
def get_funcs():
funcs = dict()
for op in (Sum, Prod, Len, All, Any, Last, First, AllNan, AnyNan, Min, Max,
ArgMin, ArgMax, Mean, Std, Var,
CumSum, CumProd, CumMax, CumMin):
funcname = op.__name__.lower()
funcs[funcname] = op(funcname)
if funcname not in funcs_no_separate_nan:
funcname = 'nan' + funcname
funcs[funcname] = op(funcname, nans=True)
return funcs
_impl_dict = get_funcs()
_default_cache = {}
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, cache=None, **kwargs):
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
if cache in (None, False):
aggregate_op = AggregateGeneric(func)
else:
if cache is True:
cache = _default_cache
aggregate_op = cache.setdefault(func, AggregateGeneric(func))
return aggregate_op(group_idx, a, size, fill_value, order, dtype, axis, **kwargs)
else:
func = _impl_dict[func]
return func(group_idx, a, size, fill_value, order, dtype, axis, **kwargs)
aggregate.__doc__ = """
This is the numba implementation of aggregate.
""" + aggregate_common_doc
@nb.njit(nogil=True, cache=True)
def step_count(group_idx):
"""Return the amount of index changes within group_idx."""
cmp_pos = 0
steps = 1
if len(group_idx) < 1:
return 0
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
steps += 1
return steps
@nb.njit(nogil=True, cache=True)
|
ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | AggregateOp.callable | python | def callable(cls, nans=False, reverse=False, scalar=False):
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter
valgetter = nb.njit(_valgetter)
outersetter = nb.njit(cls._outersetter)
_cls_inner = nb.njit(cls._inner)
if nans:
def _inner(ri, val, ret, counter, mean):
if not np.isnan(val):
_cls_inner(ri, val, ret, counter, mean)
inner = nb.njit(_inner)
else:
inner = _cls_inner
def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
# fill_value and ddof need to be present for being exchangeable with loop_2pass
size = len(ret)
rng = range(len(group_idx) - 1, -1 , -1) if reverse else range(len(group_idx))
for i in rng:
ri = group_idx[i]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
val = valgetter(a, i)
inner(ri, val, ret, counter, mean)
outersetter(outer, i, ret[ri])
return nb.njit(_loop, nogil=True) | Compile a jitted function doing the hard part of the job | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L91-L119 | null | class AggregateOp(object):
"""
Every subclass of AggregateOp handles a different aggregation operation. There are
several private class methods that need to be overwritten by the subclasses
in order to implement different functionality.
On object instantiation, all necessary static methods are compiled together into
two jitted callables, one for scalar arguments, and one for arrays. Calling the
instantiated object picks the right cached callable, does some further preprocessing
and then executes the actual aggregation operation.
"""
forced_fill_value = None
counter_fill_value = 1
counter_dtype = bool
mean_fill_value = None
mean_dtype = np.float64
outer = False
reverse = False
nans = False
def __init__(self, func=None, **kwargs):
if func is None:
func = type(self).__name__.lower()
self.func = func
self.__dict__.update(kwargs)
# Cache the compiled functions, so they don't have to be recompiled on every call
self._jit_scalar = self.callable(self.nans, self.reverse, scalar=True)
self._jit_non_scalar = self.callable(self.nans, self.reverse, scalar=False)
def __call__(self, group_idx, a, size=None, fill_value=0, order='C',
dtype=None, axis=None, ddof=0):
iv = input_validation(group_idx, a, size=size, order=order, axis=axis, check_bounds=False)
group_idx, a, flat_size, ndim_idx, size = iv
# TODO: The typecheck should be done by the class itself, not by check_dtype
dtype = check_dtype(dtype, self.func, a, len(group_idx))
check_fill_value(fill_value, dtype)
input_dtype = type(a) if np.isscalar(a) else a.dtype
ret, counter, mean, outer = self._initialize(flat_size, fill_value, dtype, input_dtype, group_idx.size)
group_idx = np.ascontiguousarray(group_idx)
if not np.isscalar(a):
a = np.ascontiguousarray(a)
jitfunc = self._jit_non_scalar
else:
jitfunc = self._jit_scalar
jitfunc(group_idx, a, ret, counter, mean, outer, fill_value, ddof)
self._finalize(ret, counter, fill_value)
if self.outer:
return outer
# Deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
@classmethod
def _initialize(cls, flat_size, fill_value, dtype, input_dtype, input_size):
if cls.forced_fill_value is None:
ret = np.full(flat_size, fill_value, dtype=dtype)
else:
ret = np.full(flat_size, cls.forced_fill_value, dtype=dtype)
counter = mean = outer = None
if cls.counter_fill_value is not None:
counter = np.full_like(ret, cls.counter_fill_value, dtype=cls.counter_dtype)
if cls.mean_fill_value is not None:
dtype = cls.mean_dtype if cls.mean_dtype else input_dtype
mean = np.full_like(ret, cls.mean_fill_value, dtype=dtype)
if cls.outer:
outer = np.full(input_size, fill_value, dtype=dtype)
return ret, counter, mean, outer
@classmethod
def _finalize(cls, ret, counter, fill_value):
if cls.forced_fill_value is not None and fill_value != cls.forced_fill_value:
ret[counter] = fill_value
@classmethod
@staticmethod
def _valgetter(a, i):
return a[i]
@staticmethod
def _valgetter_scalar(a, i):
return a
@staticmethod
def _inner(ri, val, ret, counter, mean):
raise NotImplementedError("subclasses need to overwrite _inner")
@staticmethod
def _outersetter(outer, i, val):
pass
|
ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | AggregateGeneric.callable | python | def callable(self, nans=False):
jitfunc = nb.njit(self.func, nogil=True)
def _loop(sortidx, group_idx, a, ret):
size = len(ret)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
indices = step_indices(group_idx_srt)
for i in range(len(indices) - 1):
start_idx, stop_idx = indices[i], indices[i + 1]
ri = group_idx_srt[start_idx]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
ret[ri] = jitfunc(a_srt[start_idx:stop_idx])
return nb.njit(_loop, nogil=True) | Compile a jitted function and loop it over the sorted data. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L208-L226 | null | class AggregateGeneric(AggregateOp):
"""Base class for jitting arbitrary functions."""
counter_fill_value = None
def __init__(self, func, **kwargs):
self.func = func
self.__dict__.update(kwargs)
self._jitfunc = self.callable(self.nans)
def __call__(self, group_idx, a, size=None, fill_value=0, order='C',
dtype=None, axis=None, ddof=0):
iv = input_validation(group_idx, a, size=size, order=order, axis=axis, check_bounds=False)
group_idx, a, flat_size, ndim_idx, size = iv
# TODO: The typecheck should be done by the class itself, not by check_dtype
dtype = check_dtype(dtype, self.func, a, len(group_idx))
check_fill_value(fill_value, dtype)
input_dtype = type(a) if np.isscalar(a) else a.dtype
ret, _, _, _= self._initialize(flat_size, fill_value, dtype, input_dtype, group_idx.size)
group_idx = np.ascontiguousarray(group_idx)
sortidx = np.argsort(group_idx, kind='mergesort')
self._jitfunc(sortidx, group_idx, a, ret)
# Deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
|
ml31415/numpy-groupies | numpy_groupies/utils.py | get_aliasing | python | def get_aliasing(*extra):
alias = dict((k, k) for k in funcs_common)
alias.update(_alias_str)
alias.update((fn, fn) for fn in _alias_builtin.values())
alias.update(_alias_builtin)
for d in extra:
alias.update(d)
alias.update((k, k) for k in set(alias.values()))
# Treat nan-functions as firstclass member and add them directly
for key in set(alias.values()):
if key not in funcs_no_separate_nan:
key = 'nan' + key
alias[key] = key
return alias | The assembles the dict mapping strings and functions to the list of
supported function names:
e.g. alias['add'] = 'sum' and alias[sorted] = 'sort'
This funciton should only be called during import. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils.py#L95-L113 | null | """Common helpers without certain dependencies."""
aggregate_common_doc = """
See readme file at https://github.com/ml31415/numpy-groupies for a full
description. Below we reproduce the "Full description of inputs"
section from that readme, note that the text below makes references to
other portions of the readme that are not shown here.
group_idx:
this is an array of non-negative integers, to be used as the "labels"
with which to group the values in ``a``. Although we have so far
assumed that ``group_idx`` is one-dimesnaional, and the same length as
``a``, it can in fact be two-dimensional (or some form of nested
sequences that can be converted to 2D). When ``group_idx`` is 2D, the
size of the 0th dimension corresponds to the number of dimesnions in
the output, i.e. ``group_idx[i,j]`` gives the index into the ith
dimension in the output
for ``a[j]``. Note that ``a`` should still be 1D (or scalar), with
length matching ``group_idx.shape[1]``.
a:
this is the array of values to be aggregated. See above for a
simple demonstration of what this means. ``a`` will normally be a
one-dimensional array, however it can also be a scalar in some cases.
func: default='sum'
the function to use for aggregation. See the section above for
details. Note that the simplest way to specify the function is using a
string (e.g. ``func='max'``) however a number of aliases are also
defined (e.g. you can use the ``func=np.max``, or even ``func=max``,
where ``max`` is the
builtin function). To check the available aliases see ``utils.py``.
size: default=None
the shape of the output array. If ``None``, the maximum value in
``group_idx`` will set the size of the output. Note that for
multidimensional output you need to list the size of each dimension
here, or give ``None``.
fill_value: default=0
in the example above, group 2 does not have any data, so requires some
kind of filling value - in this case the default of ``0`` is used. If
you had set ``fill_value=nan`` or something else, that value would
appear instead of ``0`` for the 2 element in the output. Note that
there are some subtle interactions between what is permitted for
``fill_value`` and the input/output ``dtype`` - exceptions should be
raised in most cases to alert the programmer if issue arrise.
order: default='C'
this is relevant only for multimensional output. It controls the
layout of the output array in memory, can be ``'F'`` for fortran-style.
dtype: default=None
the ``dtype`` of the output. By default something sensible is chosen
based on the input, aggregation function, and ``fill_value``.
ddof: default=0
passed through into calculations of variance and standard deviation
(see above).
"""
funcs_common = 'first last len mean var std allnan anynan max min argmax argmin cumsum cumprod cummax cummin'.split()
funcs_no_separate_nan = frozenset(['sort', 'rsort', 'array', 'allnan', 'anynan'])
_alias_str = {
'or': 'any',
'and': 'all',
'add': 'sum',
'count': 'len',
'plus': 'sum',
'multiply': 'prod',
'product': 'prod',
'times': 'prod',
'amax': 'max',
'maximum': 'max',
'amin': 'min',
'minimum': 'min',
'split': 'array',
'splice': 'array',
'sorted': 'sort',
'asort': 'sort',
'asorted': 'sort',
'rsorted': 'sort',
'dsort': 'sort',
'dsorted': 'rsort',
}
_alias_builtin = {
all: 'all',
any: 'any',
len: 'len',
max: 'max',
min: 'min',
sum: 'sum',
sorted: 'sort',
slice: 'array',
list: 'array',
}
aliasing = get_aliasing()
def get_func(func, aliasing, implementations):
""" Return the key of a found implementation or the func itself """
try:
func_str = aliasing[func]
except KeyError:
if callable(func):
return func
else:
if func_str in implementations:
return func_str
if func_str.startswith('nan') and \
func_str[3:] in funcs_no_separate_nan:
raise ValueError("%s does not have a nan-version".format(func_str[3:]))
else:
raise NotImplementedError("No such function available")
raise ValueError("func %s is neither a valid function string nor a "
"callable object".format(func))
def check_boolean(x):
if x not in (0, 1):
raise ValueError("Value not boolean")
try:
basestring # Attempt to evaluate basestring
def isstr(s):
return isinstance(s, basestring)
except NameError:
# Probably Python 3.x
def isstr(s):
return isinstance(s, str)
|
ml31415/numpy-groupies | numpy_groupies/utils.py | get_func | python | def get_func(func, aliasing, implementations):
try:
func_str = aliasing[func]
except KeyError:
if callable(func):
return func
else:
if func_str in implementations:
return func_str
if func_str.startswith('nan') and \
func_str[3:] in funcs_no_separate_nan:
raise ValueError("%s does not have a nan-version".format(func_str[3:]))
else:
raise NotImplementedError("No such function available")
raise ValueError("func %s is neither a valid function string nor a "
"callable object".format(func)) | Return the key of a found implementation or the func itself | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils.py#L118-L134 | null | """Common helpers without certain dependencies."""
aggregate_common_doc = """
See readme file at https://github.com/ml31415/numpy-groupies for a full
description. Below we reproduce the "Full description of inputs"
section from that readme, note that the text below makes references to
other portions of the readme that are not shown here.
group_idx:
this is an array of non-negative integers, to be used as the "labels"
with which to group the values in ``a``. Although we have so far
assumed that ``group_idx`` is one-dimesnaional, and the same length as
``a``, it can in fact be two-dimensional (or some form of nested
sequences that can be converted to 2D). When ``group_idx`` is 2D, the
size of the 0th dimension corresponds to the number of dimesnions in
the output, i.e. ``group_idx[i,j]`` gives the index into the ith
dimension in the output
for ``a[j]``. Note that ``a`` should still be 1D (or scalar), with
length matching ``group_idx.shape[1]``.
a:
this is the array of values to be aggregated. See above for a
simple demonstration of what this means. ``a`` will normally be a
one-dimensional array, however it can also be a scalar in some cases.
func: default='sum'
the function to use for aggregation. See the section above for
details. Note that the simplest way to specify the function is using a
string (e.g. ``func='max'``) however a number of aliases are also
defined (e.g. you can use the ``func=np.max``, or even ``func=max``,
where ``max`` is the
builtin function). To check the available aliases see ``utils.py``.
size: default=None
the shape of the output array. If ``None``, the maximum value in
``group_idx`` will set the size of the output. Note that for
multidimensional output you need to list the size of each dimension
here, or give ``None``.
fill_value: default=0
in the example above, group 2 does not have any data, so requires some
kind of filling value - in this case the default of ``0`` is used. If
you had set ``fill_value=nan`` or something else, that value would
appear instead of ``0`` for the 2 element in the output. Note that
there are some subtle interactions between what is permitted for
``fill_value`` and the input/output ``dtype`` - exceptions should be
raised in most cases to alert the programmer if issue arrise.
order: default='C'
this is relevant only for multimensional output. It controls the
layout of the output array in memory, can be ``'F'`` for fortran-style.
dtype: default=None
the ``dtype`` of the output. By default something sensible is chosen
based on the input, aggregation function, and ``fill_value``.
ddof: default=0
passed through into calculations of variance and standard deviation
(see above).
"""
funcs_common = 'first last len mean var std allnan anynan max min argmax argmin cumsum cumprod cummax cummin'.split()
funcs_no_separate_nan = frozenset(['sort', 'rsort', 'array', 'allnan', 'anynan'])
_alias_str = {
'or': 'any',
'and': 'all',
'add': 'sum',
'count': 'len',
'plus': 'sum',
'multiply': 'prod',
'product': 'prod',
'times': 'prod',
'amax': 'max',
'maximum': 'max',
'amin': 'min',
'minimum': 'min',
'split': 'array',
'splice': 'array',
'sorted': 'sort',
'asort': 'sort',
'asorted': 'sort',
'rsorted': 'sort',
'dsort': 'sort',
'dsorted': 'rsort',
}
_alias_builtin = {
all: 'all',
any: 'any',
len: 'len',
max: 'max',
min: 'min',
sum: 'sum',
sorted: 'sort',
slice: 'array',
list: 'array',
}
def get_aliasing(*extra):
"""The assembles the dict mapping strings and functions to the list of
supported function names:
e.g. alias['add'] = 'sum' and alias[sorted] = 'sort'
This funciton should only be called during import.
"""
alias = dict((k, k) for k in funcs_common)
alias.update(_alias_str)
alias.update((fn, fn) for fn in _alias_builtin.values())
alias.update(_alias_builtin)
for d in extra:
alias.update(d)
alias.update((k, k) for k in set(alias.values()))
# Treat nan-functions as firstclass member and add them directly
for key in set(alias.values()):
if key not in funcs_no_separate_nan:
key = 'nan' + key
alias[key] = key
return alias
aliasing = get_aliasing()
def check_boolean(x):
if x not in (0, 1):
raise ValueError("Value not boolean")
try:
basestring # Attempt to evaluate basestring
def isstr(s):
return isinstance(s, basestring)
except NameError:
# Probably Python 3.x
def isstr(s):
return isinstance(s, str)
|
ml31415/numpy-groupies | numpy_groupies/utils_numpy.py | minimum_dtype | python | def minimum_dtype(x, dtype=np.bool_):
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32) | returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L60-L90 | [
"def check_type(x, dtype):\n try:\n converted = dtype.type(x)\n except (ValueError, OverflowError):\n return False\n # False if some overflow has happened\n return converted == x or np.isnan(x)\n",
"def type_loop(x, dtype, dtype_dict, default=None):\n while True:\n try:\n dtype = np.dtype(dtype_dict[dtype.name])\n if check_type(x, dtype):\n return np.dtype(dtype)\n except KeyError:\n if default is not None:\n return np.dtype(default)\n raise ValueError(\"Can not determine dtype of %r\" % x)\n"
] | """Common helper functions for typing and general numpy tools."""
import numpy as np
from .utils import get_aliasing
_alias_numpy = {
np.add: 'sum',
np.sum: 'sum',
np.any: 'any',
np.all: 'all',
np.multiply: 'prod',
np.prod: 'prod',
np.amin: 'min',
np.min: 'min',
np.minimum: 'min',
np.amax: 'max',
np.max: 'max',
np.maximum: 'max',
np.argmax: 'argmax',
np.argmin: 'argmin',
np.mean: 'mean',
np.std: 'std',
np.var: 'var',
np.array: 'array',
np.asarray: 'array',
np.sort: 'sort',
np.nansum: 'nansum',
np.nanprod: 'nanprod',
np.nanmean: 'nanmean',
np.nanvar: 'nanvar',
np.nanmax: 'nanmax',
np.nanmin: 'nanmin',
np.nanstd: 'nanstd',
np.nanargmax: 'nanargmax',
np.nanargmin: 'nanargmin',
np.cumsum: 'cumsum',
np.cumprod: 'cumprod',
}
aliasing = get_aliasing(_alias_numpy)
_next_int_dtype = dict(
bool=np.int8,
uint8=np.int16,
int8=np.int16,
uint16=np.int32,
int16=np.int32,
uint32=np.int64,
int32=np.int64
)
_next_float_dtype = dict(
float16=np.float32,
float32=np.float64,
float64=np.complex64,
complex64=np.complex128
)
def minimum_dtype_scalar(x, dtype, a):
if dtype is None:
dtype = np.dtype(type(a)) if isinstance(a, (int, float))\
else a.dtype
return minimum_dtype(x, dtype)
_forced_types = {
'array': np.object,
'all': np.bool_,
'any': np.bool_,
'nanall': np.bool_,
'nanany': np.bool_,
'len': np.int64,
'nanlen': np.int64,
'allnan': np.bool_,
'anynan': np.bool_,
'argmax': np.int64,
'argmin': np.int64,
}
_forced_float_types = {'mean', 'var', 'std', 'nanmean', 'nanvar', 'nanstd'}
_forced_same_type = {'min', 'max', 'first', 'last', 'nanmin', 'nanmax',
'nanfirst', 'nanlast'}
def check_dtype(dtype, func_str, a, n):
if np.isscalar(a) or not a.shape:
if func_str not in ("sum", "prod", "len"):
raise ValueError("scalar inputs are supported only for 'sum', "
"'prod' and 'len'")
a_dtype = np.dtype(type(a))
else:
a_dtype = a.dtype
if dtype is not None:
# dtype set by the user
# Careful here: np.bool != np.bool_ !
if np.issubdtype(dtype, np.bool_) and \
not('all' in func_str or 'any' in func_str):
raise TypeError("function %s requires a more complex datatype "
"than bool" % func_str)
if not np.issubdtype(dtype, np.integer) and func_str in ('len', 'nanlen'):
raise TypeError("function %s requires an integer datatype" % func_str)
# TODO: Maybe have some more checks here
return np.dtype(dtype)
else:
try:
return np.dtype(_forced_types[func_str])
except KeyError:
if func_str in _forced_float_types:
if np.issubdtype(a_dtype, np.floating):
return a_dtype
else:
return np.dtype(np.float64)
else:
if func_str == 'sum':
# Try to guess the minimally required int size
if np.issubdtype(a_dtype, np.int64):
# It's not getting bigger anymore
# TODO: strictly speaking it might need float
return np.dtype(np.int64)
elif np.issubdtype(a_dtype, np.integer):
maxval = np.iinfo(a_dtype).max * n
return minimum_dtype(maxval, a_dtype)
elif np.issubdtype(a_dtype, np.bool_):
return minimum_dtype(n, a_dtype)
else:
# floating, inexact, whatever
return a_dtype
elif func_str in _forced_same_type:
return a_dtype
else:
if isinstance(a_dtype, np.integer):
return np.dtype(np.int64)
else:
return a_dtype
def check_fill_value(fill_value, dtype):
try:
return dtype.type(fill_value)
except ValueError:
raise ValueError("fill_value must be convertible into %s"
% dtype.type.__name__)
def check_group_idx(group_idx, a=None, check_min=True):
if a is not None and group_idx.size != a.size:
raise ValueError("The size of group_idx must be the same as "
"a.size")
if not issubclass(group_idx.dtype.type, np.integer):
raise TypeError("group_idx must be of integer type")
if check_min and np.min(group_idx) < 0:
raise ValueError("group_idx contains negative indices")
def input_validation(group_idx, a, size=None, order='C', axis=None,
ravel_group_idx=True, check_bounds=True):
""" Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing.
"""
if not isinstance(a, (int, float, complex)):
a = np.asanyarray(a)
group_idx = np.asanyarray(group_idx)
if not np.issubdtype(group_idx.dtype, np.integer):
raise TypeError("group_idx must be of integer type")
# This check works for multidimensional indexing as well
if check_bounds and np.any(group_idx < 0):
raise ValueError("negative indices not supported")
ndim_idx = np.ndim(group_idx)
ndim_a = np.ndim(a)
# Deal with the axis arg: if present, then turn 1d indexing into
# multi-dimensional indexing along the specified axis.
if axis is None:
if ndim_a > 1:
raise ValueError("a must be scalar or 1 dimensional, use .ravel to"
" flatten. Alternatively specify axis.")
elif axis >= ndim_a or axis < -ndim_a:
raise ValueError("axis arg too large for np.ndim(a)")
else:
axis = axis if axis >= 0 else ndim_a + axis # negative indexing
if ndim_idx > 1:
# TODO: we could support a sequence of axis values for multiple
# dimensions of group_idx.
raise NotImplementedError("only 1d indexing currently"
"supported with axis arg.")
elif a.shape[axis] != len(group_idx):
raise ValueError("a.shape[axis] doesn't match length of group_idx.")
elif size is not None and not np.isscalar(size):
raise NotImplementedError("when using axis arg, size must be"
"None or scalar.")
else:
# Create the broadcast-ready multidimensional indexing.
# Note the user could do this themselves, so this is
# very much just a convenience.
size_in = np.max(group_idx) + 1 if size is None else size
group_idx_in = group_idx
group_idx = []
size = []
for ii, s in enumerate(a.shape):
ii_idx = group_idx_in if ii == axis else np.arange(s)
ii_shape = [1] * ndim_a
ii_shape[ii] = s
group_idx.append(ii_idx.reshape(ii_shape))
size.append(size_in if ii == axis else s)
# Use the indexing, and return. It's a bit simpler than
# using trying to keep all the logic below happy
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
ndim_idx = ndim_a
return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size
if ndim_idx == 1:
if size is None:
size = np.max(group_idx) + 1
else:
if not np.isscalar(size):
raise ValueError("output size must be scalar or None")
if check_bounds and np.any(group_idx > size - 1):
raise ValueError("one or more indices are too large for "
"size %d" % size)
flat_size = size
else:
if size is None:
size = np.max(group_idx, axis=1) + 1
elif np.isscalar(size):
raise ValueError("output size must be of length %d"
% len(group_idx))
elif len(size) != len(group_idx):
raise ValueError("%d sizes given, but %d output dimensions "
"specified in index" % (len(size),
len(group_idx)))
if ravel_group_idx:
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
if not (np.ndim(a) == 0 or len(a) == group_idx.size):
raise ValueError("group_idx and a must be of the same length, or a"
" can be scalar")
return group_idx, a, flat_size, ndim_idx, size
### General tools ###
def unpack(group_idx, ret):
""" Take an aggregate packed array and uncompress it to the size of group_idx.
This is equivalent to ret[group_idx].
"""
return ret[group_idx]
def allnan(x):
return np.all(np.isnan(x))
def anynan(x):
return np.any(np.isnan(x))
def nanfirst(x):
return x[~np.isnan(x)][0]
def nanlast(x):
return x[~np.isnan(x)][-1]
def multi_arange(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise ValueError("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1] + 1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def label_contiguous_1d(X):
"""
WARNING: API for this function is not liable to change!!!
By example:
X = [F T T F F T F F F T T T]
result = [0 1 1 0 0 2 0 0 0 3 3 3]
Or:
X = [0 3 3 0 0 5 5 5 1 1 0 2]
result = [0 1 1 0 0 2 2 2 3 3 0 4]
The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X``
is a boolean array, each contiguous block of ``True`` is given an integer
label, if ``X`` is not boolean, then each contiguous block of identical values
is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1
and increase by 1 for each block with no skipped numbers.)
"""
if X.ndim != 1:
raise ValueError("this is for 1d masks only.")
is_start = np.empty(len(X), dtype=bool)
is_start[0] = X[0] # True if X[0] is True or non-zero
if X.dtype.kind == 'b':
is_start[1:] = ~X[:-1] & X[1:]
M = X
else:
M = X.astype(bool)
is_start[1:] = X[:-1] != X[1:]
is_start[~M] = False
L = np.cumsum(is_start)
L[~M] = 0
return L
def relabel_groups_unique(group_idx):
"""
See also ``relabel_groups_masked``.
keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4]
Description of above: unique groups in input was ``1,2,3,5``, i.e.
``4`` was missing, so group 5 was relabled to be ``4``.
Relabeling maintains order, just "compressing" the higher numbers
to fill gaps.
"""
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool)
keep_group[0] = True
keep_group[group_idx] = True
return relabel_groups_masked(group_idx, keep_group)
def relabel_groups_masked(group_idx, keep_group):
"""
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and
"""
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx]
|
ml31415/numpy-groupies | numpy_groupies/utils_numpy.py | input_validation | python | def input_validation(group_idx, a, size=None, order='C', axis=None,
ravel_group_idx=True, check_bounds=True):
if not isinstance(a, (int, float, complex)):
a = np.asanyarray(a)
group_idx = np.asanyarray(group_idx)
if not np.issubdtype(group_idx.dtype, np.integer):
raise TypeError("group_idx must be of integer type")
# This check works for multidimensional indexing as well
if check_bounds and np.any(group_idx < 0):
raise ValueError("negative indices not supported")
ndim_idx = np.ndim(group_idx)
ndim_a = np.ndim(a)
# Deal with the axis arg: if present, then turn 1d indexing into
# multi-dimensional indexing along the specified axis.
if axis is None:
if ndim_a > 1:
raise ValueError("a must be scalar or 1 dimensional, use .ravel to"
" flatten. Alternatively specify axis.")
elif axis >= ndim_a or axis < -ndim_a:
raise ValueError("axis arg too large for np.ndim(a)")
else:
axis = axis if axis >= 0 else ndim_a + axis # negative indexing
if ndim_idx > 1:
# TODO: we could support a sequence of axis values for multiple
# dimensions of group_idx.
raise NotImplementedError("only 1d indexing currently"
"supported with axis arg.")
elif a.shape[axis] != len(group_idx):
raise ValueError("a.shape[axis] doesn't match length of group_idx.")
elif size is not None and not np.isscalar(size):
raise NotImplementedError("when using axis arg, size must be"
"None or scalar.")
else:
# Create the broadcast-ready multidimensional indexing.
# Note the user could do this themselves, so this is
# very much just a convenience.
size_in = np.max(group_idx) + 1 if size is None else size
group_idx_in = group_idx
group_idx = []
size = []
for ii, s in enumerate(a.shape):
ii_idx = group_idx_in if ii == axis else np.arange(s)
ii_shape = [1] * ndim_a
ii_shape[ii] = s
group_idx.append(ii_idx.reshape(ii_shape))
size.append(size_in if ii == axis else s)
# Use the indexing, and return. It's a bit simpler than
# using trying to keep all the logic below happy
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
ndim_idx = ndim_a
return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size
if ndim_idx == 1:
if size is None:
size = np.max(group_idx) + 1
else:
if not np.isscalar(size):
raise ValueError("output size must be scalar or None")
if check_bounds and np.any(group_idx > size - 1):
raise ValueError("one or more indices are too large for "
"size %d" % size)
flat_size = size
else:
if size is None:
size = np.max(group_idx, axis=1) + 1
elif np.isscalar(size):
raise ValueError("output size must be of length %d"
% len(group_idx))
elif len(size) != len(group_idx):
raise ValueError("%d sizes given, but %d output dimensions "
"specified in index" % (len(size),
len(group_idx)))
if ravel_group_idx:
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
if not (np.ndim(a) == 0 or len(a) == group_idx.size):
raise ValueError("group_idx and a must be of the same length, or a"
" can be scalar")
return group_idx, a, flat_size, ndim_idx, size | Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L189-L280 | null | """Common helper functions for typing and general numpy tools."""
import numpy as np
from .utils import get_aliasing
_alias_numpy = {
np.add: 'sum',
np.sum: 'sum',
np.any: 'any',
np.all: 'all',
np.multiply: 'prod',
np.prod: 'prod',
np.amin: 'min',
np.min: 'min',
np.minimum: 'min',
np.amax: 'max',
np.max: 'max',
np.maximum: 'max',
np.argmax: 'argmax',
np.argmin: 'argmin',
np.mean: 'mean',
np.std: 'std',
np.var: 'var',
np.array: 'array',
np.asarray: 'array',
np.sort: 'sort',
np.nansum: 'nansum',
np.nanprod: 'nanprod',
np.nanmean: 'nanmean',
np.nanvar: 'nanvar',
np.nanmax: 'nanmax',
np.nanmin: 'nanmin',
np.nanstd: 'nanstd',
np.nanargmax: 'nanargmax',
np.nanargmin: 'nanargmin',
np.cumsum: 'cumsum',
np.cumprod: 'cumprod',
}
aliasing = get_aliasing(_alias_numpy)
_next_int_dtype = dict(
bool=np.int8,
uint8=np.int16,
int8=np.int16,
uint16=np.int32,
int16=np.int32,
uint32=np.int64,
int32=np.int64
)
_next_float_dtype = dict(
float16=np.float32,
float32=np.float64,
float64=np.complex64,
complex64=np.complex128
)
def minimum_dtype(x, dtype=np.bool_):
"""returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype."""
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32)
def minimum_dtype_scalar(x, dtype, a):
if dtype is None:
dtype = np.dtype(type(a)) if isinstance(a, (int, float))\
else a.dtype
return minimum_dtype(x, dtype)
_forced_types = {
'array': np.object,
'all': np.bool_,
'any': np.bool_,
'nanall': np.bool_,
'nanany': np.bool_,
'len': np.int64,
'nanlen': np.int64,
'allnan': np.bool_,
'anynan': np.bool_,
'argmax': np.int64,
'argmin': np.int64,
}
_forced_float_types = {'mean', 'var', 'std', 'nanmean', 'nanvar', 'nanstd'}
_forced_same_type = {'min', 'max', 'first', 'last', 'nanmin', 'nanmax',
'nanfirst', 'nanlast'}
def check_dtype(dtype, func_str, a, n):
if np.isscalar(a) or not a.shape:
if func_str not in ("sum", "prod", "len"):
raise ValueError("scalar inputs are supported only for 'sum', "
"'prod' and 'len'")
a_dtype = np.dtype(type(a))
else:
a_dtype = a.dtype
if dtype is not None:
# dtype set by the user
# Careful here: np.bool != np.bool_ !
if np.issubdtype(dtype, np.bool_) and \
not('all' in func_str or 'any' in func_str):
raise TypeError("function %s requires a more complex datatype "
"than bool" % func_str)
if not np.issubdtype(dtype, np.integer) and func_str in ('len', 'nanlen'):
raise TypeError("function %s requires an integer datatype" % func_str)
# TODO: Maybe have some more checks here
return np.dtype(dtype)
else:
try:
return np.dtype(_forced_types[func_str])
except KeyError:
if func_str in _forced_float_types:
if np.issubdtype(a_dtype, np.floating):
return a_dtype
else:
return np.dtype(np.float64)
else:
if func_str == 'sum':
# Try to guess the minimally required int size
if np.issubdtype(a_dtype, np.int64):
# It's not getting bigger anymore
# TODO: strictly speaking it might need float
return np.dtype(np.int64)
elif np.issubdtype(a_dtype, np.integer):
maxval = np.iinfo(a_dtype).max * n
return minimum_dtype(maxval, a_dtype)
elif np.issubdtype(a_dtype, np.bool_):
return minimum_dtype(n, a_dtype)
else:
# floating, inexact, whatever
return a_dtype
elif func_str in _forced_same_type:
return a_dtype
else:
if isinstance(a_dtype, np.integer):
return np.dtype(np.int64)
else:
return a_dtype
def check_fill_value(fill_value, dtype):
try:
return dtype.type(fill_value)
except ValueError:
raise ValueError("fill_value must be convertible into %s"
% dtype.type.__name__)
def check_group_idx(group_idx, a=None, check_min=True):
if a is not None and group_idx.size != a.size:
raise ValueError("The size of group_idx must be the same as "
"a.size")
if not issubclass(group_idx.dtype.type, np.integer):
raise TypeError("group_idx must be of integer type")
if check_min and np.min(group_idx) < 0:
raise ValueError("group_idx contains negative indices")
### General tools ###
def unpack(group_idx, ret):
""" Take an aggregate packed array and uncompress it to the size of group_idx.
This is equivalent to ret[group_idx].
"""
return ret[group_idx]
def allnan(x):
return np.all(np.isnan(x))
def anynan(x):
return np.any(np.isnan(x))
def nanfirst(x):
return x[~np.isnan(x)][0]
def nanlast(x):
return x[~np.isnan(x)][-1]
def multi_arange(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise ValueError("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1] + 1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def label_contiguous_1d(X):
"""
WARNING: API for this function is not liable to change!!!
By example:
X = [F T T F F T F F F T T T]
result = [0 1 1 0 0 2 0 0 0 3 3 3]
Or:
X = [0 3 3 0 0 5 5 5 1 1 0 2]
result = [0 1 1 0 0 2 2 2 3 3 0 4]
The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X``
is a boolean array, each contiguous block of ``True`` is given an integer
label, if ``X`` is not boolean, then each contiguous block of identical values
is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1
and increase by 1 for each block with no skipped numbers.)
"""
if X.ndim != 1:
raise ValueError("this is for 1d masks only.")
is_start = np.empty(len(X), dtype=bool)
is_start[0] = X[0] # True if X[0] is True or non-zero
if X.dtype.kind == 'b':
is_start[1:] = ~X[:-1] & X[1:]
M = X
else:
M = X.astype(bool)
is_start[1:] = X[:-1] != X[1:]
is_start[~M] = False
L = np.cumsum(is_start)
L[~M] = 0
return L
def relabel_groups_unique(group_idx):
"""
See also ``relabel_groups_masked``.
keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4]
Description of above: unique groups in input was ``1,2,3,5``, i.e.
``4`` was missing, so group 5 was relabled to be ``4``.
Relabeling maintains order, just "compressing" the higher numbers
to fill gaps.
"""
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool)
keep_group[0] = True
keep_group[group_idx] = True
return relabel_groups_masked(group_idx, keep_group)
def relabel_groups_masked(group_idx, keep_group):
"""
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and
"""
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx]
|
ml31415/numpy-groupies | numpy_groupies/utils_numpy.py | multi_arange | python | def multi_arange(n):
if n.ndim != 1:
raise ValueError("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1] + 1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1] | By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L309-L332 | null | """Common helper functions for typing and general numpy tools."""
import numpy as np
from .utils import get_aliasing
_alias_numpy = {
np.add: 'sum',
np.sum: 'sum',
np.any: 'any',
np.all: 'all',
np.multiply: 'prod',
np.prod: 'prod',
np.amin: 'min',
np.min: 'min',
np.minimum: 'min',
np.amax: 'max',
np.max: 'max',
np.maximum: 'max',
np.argmax: 'argmax',
np.argmin: 'argmin',
np.mean: 'mean',
np.std: 'std',
np.var: 'var',
np.array: 'array',
np.asarray: 'array',
np.sort: 'sort',
np.nansum: 'nansum',
np.nanprod: 'nanprod',
np.nanmean: 'nanmean',
np.nanvar: 'nanvar',
np.nanmax: 'nanmax',
np.nanmin: 'nanmin',
np.nanstd: 'nanstd',
np.nanargmax: 'nanargmax',
np.nanargmin: 'nanargmin',
np.cumsum: 'cumsum',
np.cumprod: 'cumprod',
}
aliasing = get_aliasing(_alias_numpy)
_next_int_dtype = dict(
bool=np.int8,
uint8=np.int16,
int8=np.int16,
uint16=np.int32,
int16=np.int32,
uint32=np.int64,
int32=np.int64
)
_next_float_dtype = dict(
float16=np.float32,
float32=np.float64,
float64=np.complex64,
complex64=np.complex128
)
def minimum_dtype(x, dtype=np.bool_):
"""returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype."""
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32)
def minimum_dtype_scalar(x, dtype, a):
if dtype is None:
dtype = np.dtype(type(a)) if isinstance(a, (int, float))\
else a.dtype
return minimum_dtype(x, dtype)
_forced_types = {
'array': np.object,
'all': np.bool_,
'any': np.bool_,
'nanall': np.bool_,
'nanany': np.bool_,
'len': np.int64,
'nanlen': np.int64,
'allnan': np.bool_,
'anynan': np.bool_,
'argmax': np.int64,
'argmin': np.int64,
}
_forced_float_types = {'mean', 'var', 'std', 'nanmean', 'nanvar', 'nanstd'}
_forced_same_type = {'min', 'max', 'first', 'last', 'nanmin', 'nanmax',
'nanfirst', 'nanlast'}
def check_dtype(dtype, func_str, a, n):
if np.isscalar(a) or not a.shape:
if func_str not in ("sum", "prod", "len"):
raise ValueError("scalar inputs are supported only for 'sum', "
"'prod' and 'len'")
a_dtype = np.dtype(type(a))
else:
a_dtype = a.dtype
if dtype is not None:
# dtype set by the user
# Careful here: np.bool != np.bool_ !
if np.issubdtype(dtype, np.bool_) and \
not('all' in func_str or 'any' in func_str):
raise TypeError("function %s requires a more complex datatype "
"than bool" % func_str)
if not np.issubdtype(dtype, np.integer) and func_str in ('len', 'nanlen'):
raise TypeError("function %s requires an integer datatype" % func_str)
# TODO: Maybe have some more checks here
return np.dtype(dtype)
else:
try:
return np.dtype(_forced_types[func_str])
except KeyError:
if func_str in _forced_float_types:
if np.issubdtype(a_dtype, np.floating):
return a_dtype
else:
return np.dtype(np.float64)
else:
if func_str == 'sum':
# Try to guess the minimally required int size
if np.issubdtype(a_dtype, np.int64):
# It's not getting bigger anymore
# TODO: strictly speaking it might need float
return np.dtype(np.int64)
elif np.issubdtype(a_dtype, np.integer):
maxval = np.iinfo(a_dtype).max * n
return minimum_dtype(maxval, a_dtype)
elif np.issubdtype(a_dtype, np.bool_):
return minimum_dtype(n, a_dtype)
else:
# floating, inexact, whatever
return a_dtype
elif func_str in _forced_same_type:
return a_dtype
else:
if isinstance(a_dtype, np.integer):
return np.dtype(np.int64)
else:
return a_dtype
def check_fill_value(fill_value, dtype):
try:
return dtype.type(fill_value)
except ValueError:
raise ValueError("fill_value must be convertible into %s"
% dtype.type.__name__)
def check_group_idx(group_idx, a=None, check_min=True):
if a is not None and group_idx.size != a.size:
raise ValueError("The size of group_idx must be the same as "
"a.size")
if not issubclass(group_idx.dtype.type, np.integer):
raise TypeError("group_idx must be of integer type")
if check_min and np.min(group_idx) < 0:
raise ValueError("group_idx contains negative indices")
def input_validation(group_idx, a, size=None, order='C', axis=None,
ravel_group_idx=True, check_bounds=True):
""" Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing.
"""
if not isinstance(a, (int, float, complex)):
a = np.asanyarray(a)
group_idx = np.asanyarray(group_idx)
if not np.issubdtype(group_idx.dtype, np.integer):
raise TypeError("group_idx must be of integer type")
# This check works for multidimensional indexing as well
if check_bounds and np.any(group_idx < 0):
raise ValueError("negative indices not supported")
ndim_idx = np.ndim(group_idx)
ndim_a = np.ndim(a)
# Deal with the axis arg: if present, then turn 1d indexing into
# multi-dimensional indexing along the specified axis.
if axis is None:
if ndim_a > 1:
raise ValueError("a must be scalar or 1 dimensional, use .ravel to"
" flatten. Alternatively specify axis.")
elif axis >= ndim_a or axis < -ndim_a:
raise ValueError("axis arg too large for np.ndim(a)")
else:
axis = axis if axis >= 0 else ndim_a + axis # negative indexing
if ndim_idx > 1:
# TODO: we could support a sequence of axis values for multiple
# dimensions of group_idx.
raise NotImplementedError("only 1d indexing currently"
"supported with axis arg.")
elif a.shape[axis] != len(group_idx):
raise ValueError("a.shape[axis] doesn't match length of group_idx.")
elif size is not None and not np.isscalar(size):
raise NotImplementedError("when using axis arg, size must be"
"None or scalar.")
else:
# Create the broadcast-ready multidimensional indexing.
# Note the user could do this themselves, so this is
# very much just a convenience.
size_in = np.max(group_idx) + 1 if size is None else size
group_idx_in = group_idx
group_idx = []
size = []
for ii, s in enumerate(a.shape):
ii_idx = group_idx_in if ii == axis else np.arange(s)
ii_shape = [1] * ndim_a
ii_shape[ii] = s
group_idx.append(ii_idx.reshape(ii_shape))
size.append(size_in if ii == axis else s)
# Use the indexing, and return. It's a bit simpler than
# using trying to keep all the logic below happy
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
ndim_idx = ndim_a
return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size
if ndim_idx == 1:
if size is None:
size = np.max(group_idx) + 1
else:
if not np.isscalar(size):
raise ValueError("output size must be scalar or None")
if check_bounds and np.any(group_idx > size - 1):
raise ValueError("one or more indices are too large for "
"size %d" % size)
flat_size = size
else:
if size is None:
size = np.max(group_idx, axis=1) + 1
elif np.isscalar(size):
raise ValueError("output size must be of length %d"
% len(group_idx))
elif len(size) != len(group_idx):
raise ValueError("%d sizes given, but %d output dimensions "
"specified in index" % (len(size),
len(group_idx)))
if ravel_group_idx:
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
if not (np.ndim(a) == 0 or len(a) == group_idx.size):
raise ValueError("group_idx and a must be of the same length, or a"
" can be scalar")
return group_idx, a, flat_size, ndim_idx, size
### General tools ###
def unpack(group_idx, ret):
""" Take an aggregate packed array and uncompress it to the size of group_idx.
This is equivalent to ret[group_idx].
"""
return ret[group_idx]
def allnan(x):
return np.all(np.isnan(x))
def anynan(x):
return np.any(np.isnan(x))
def nanfirst(x):
return x[~np.isnan(x)][0]
def nanlast(x):
return x[~np.isnan(x)][-1]
def multi_arange(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise ValueError("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1] + 1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def label_contiguous_1d(X):
"""
WARNING: API for this function is not liable to change!!!
By example:
X = [F T T F F T F F F T T T]
result = [0 1 1 0 0 2 0 0 0 3 3 3]
Or:
X = [0 3 3 0 0 5 5 5 1 1 0 2]
result = [0 1 1 0 0 2 2 2 3 3 0 4]
The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X``
is a boolean array, each contiguous block of ``True`` is given an integer
label, if ``X`` is not boolean, then each contiguous block of identical values
is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1
and increase by 1 for each block with no skipped numbers.)
"""
if X.ndim != 1:
raise ValueError("this is for 1d masks only.")
is_start = np.empty(len(X), dtype=bool)
is_start[0] = X[0] # True if X[0] is True or non-zero
if X.dtype.kind == 'b':
is_start[1:] = ~X[:-1] & X[1:]
M = X
else:
M = X.astype(bool)
is_start[1:] = X[:-1] != X[1:]
is_start[~M] = False
L = np.cumsum(is_start)
L[~M] = 0
return L
def relabel_groups_unique(group_idx):
"""
See also ``relabel_groups_masked``.
keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4]
Description of above: unique groups in input was ``1,2,3,5``, i.e.
``4`` was missing, so group 5 was relabled to be ``4``.
Relabeling maintains order, just "compressing" the higher numbers
to fill gaps.
"""
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool)
keep_group[0] = True
keep_group[group_idx] = True
return relabel_groups_masked(group_idx, keep_group)
def relabel_groups_masked(group_idx, keep_group):
"""
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and
"""
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx]
|
ml31415/numpy-groupies | numpy_groupies/utils_numpy.py | label_contiguous_1d | python | def label_contiguous_1d(X):
if X.ndim != 1:
raise ValueError("this is for 1d masks only.")
is_start = np.empty(len(X), dtype=bool)
is_start[0] = X[0] # True if X[0] is True or non-zero
if X.dtype.kind == 'b':
is_start[1:] = ~X[:-1] & X[1:]
M = X
else:
M = X.astype(bool)
is_start[1:] = X[:-1] != X[1:]
is_start[~M] = False
L = np.cumsum(is_start)
L[~M] = 0
return L | WARNING: API for this function is not liable to change!!!
By example:
X = [F T T F F T F F F T T T]
result = [0 1 1 0 0 2 0 0 0 3 3 3]
Or:
X = [0 3 3 0 0 5 5 5 1 1 0 2]
result = [0 1 1 0 0 2 2 2 3 3 0 4]
The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X``
is a boolean array, each contiguous block of ``True`` is given an integer
label, if ``X`` is not boolean, then each contiguous block of identical values
is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1
and increase by 1 for each block with no skipped numbers.) | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L335-L372 | null | """Common helper functions for typing and general numpy tools."""
import numpy as np
from .utils import get_aliasing
_alias_numpy = {
np.add: 'sum',
np.sum: 'sum',
np.any: 'any',
np.all: 'all',
np.multiply: 'prod',
np.prod: 'prod',
np.amin: 'min',
np.min: 'min',
np.minimum: 'min',
np.amax: 'max',
np.max: 'max',
np.maximum: 'max',
np.argmax: 'argmax',
np.argmin: 'argmin',
np.mean: 'mean',
np.std: 'std',
np.var: 'var',
np.array: 'array',
np.asarray: 'array',
np.sort: 'sort',
np.nansum: 'nansum',
np.nanprod: 'nanprod',
np.nanmean: 'nanmean',
np.nanvar: 'nanvar',
np.nanmax: 'nanmax',
np.nanmin: 'nanmin',
np.nanstd: 'nanstd',
np.nanargmax: 'nanargmax',
np.nanargmin: 'nanargmin',
np.cumsum: 'cumsum',
np.cumprod: 'cumprod',
}
aliasing = get_aliasing(_alias_numpy)
_next_int_dtype = dict(
bool=np.int8,
uint8=np.int16,
int8=np.int16,
uint16=np.int32,
int16=np.int32,
uint32=np.int64,
int32=np.int64
)
_next_float_dtype = dict(
float16=np.float32,
float32=np.float64,
float64=np.complex64,
complex64=np.complex128
)
def minimum_dtype(x, dtype=np.bool_):
"""returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype."""
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32)
def minimum_dtype_scalar(x, dtype, a):
if dtype is None:
dtype = np.dtype(type(a)) if isinstance(a, (int, float))\
else a.dtype
return minimum_dtype(x, dtype)
_forced_types = {
'array': np.object,
'all': np.bool_,
'any': np.bool_,
'nanall': np.bool_,
'nanany': np.bool_,
'len': np.int64,
'nanlen': np.int64,
'allnan': np.bool_,
'anynan': np.bool_,
'argmax': np.int64,
'argmin': np.int64,
}
_forced_float_types = {'mean', 'var', 'std', 'nanmean', 'nanvar', 'nanstd'}
_forced_same_type = {'min', 'max', 'first', 'last', 'nanmin', 'nanmax',
'nanfirst', 'nanlast'}
def check_dtype(dtype, func_str, a, n):
if np.isscalar(a) or not a.shape:
if func_str not in ("sum", "prod", "len"):
raise ValueError("scalar inputs are supported only for 'sum', "
"'prod' and 'len'")
a_dtype = np.dtype(type(a))
else:
a_dtype = a.dtype
if dtype is not None:
# dtype set by the user
# Careful here: np.bool != np.bool_ !
if np.issubdtype(dtype, np.bool_) and \
not('all' in func_str or 'any' in func_str):
raise TypeError("function %s requires a more complex datatype "
"than bool" % func_str)
if not np.issubdtype(dtype, np.integer) and func_str in ('len', 'nanlen'):
raise TypeError("function %s requires an integer datatype" % func_str)
# TODO: Maybe have some more checks here
return np.dtype(dtype)
else:
try:
return np.dtype(_forced_types[func_str])
except KeyError:
if func_str in _forced_float_types:
if np.issubdtype(a_dtype, np.floating):
return a_dtype
else:
return np.dtype(np.float64)
else:
if func_str == 'sum':
# Try to guess the minimally required int size
if np.issubdtype(a_dtype, np.int64):
# It's not getting bigger anymore
# TODO: strictly speaking it might need float
return np.dtype(np.int64)
elif np.issubdtype(a_dtype, np.integer):
maxval = np.iinfo(a_dtype).max * n
return minimum_dtype(maxval, a_dtype)
elif np.issubdtype(a_dtype, np.bool_):
return minimum_dtype(n, a_dtype)
else:
# floating, inexact, whatever
return a_dtype
elif func_str in _forced_same_type:
return a_dtype
else:
if isinstance(a_dtype, np.integer):
return np.dtype(np.int64)
else:
return a_dtype
def check_fill_value(fill_value, dtype):
try:
return dtype.type(fill_value)
except ValueError:
raise ValueError("fill_value must be convertible into %s"
% dtype.type.__name__)
def check_group_idx(group_idx, a=None, check_min=True):
if a is not None and group_idx.size != a.size:
raise ValueError("The size of group_idx must be the same as "
"a.size")
if not issubclass(group_idx.dtype.type, np.integer):
raise TypeError("group_idx must be of integer type")
if check_min and np.min(group_idx) < 0:
raise ValueError("group_idx contains negative indices")
def input_validation(group_idx, a, size=None, order='C', axis=None,
ravel_group_idx=True, check_bounds=True):
""" Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing.
"""
if not isinstance(a, (int, float, complex)):
a = np.asanyarray(a)
group_idx = np.asanyarray(group_idx)
if not np.issubdtype(group_idx.dtype, np.integer):
raise TypeError("group_idx must be of integer type")
# This check works for multidimensional indexing as well
if check_bounds and np.any(group_idx < 0):
raise ValueError("negative indices not supported")
ndim_idx = np.ndim(group_idx)
ndim_a = np.ndim(a)
# Deal with the axis arg: if present, then turn 1d indexing into
# multi-dimensional indexing along the specified axis.
if axis is None:
if ndim_a > 1:
raise ValueError("a must be scalar or 1 dimensional, use .ravel to"
" flatten. Alternatively specify axis.")
elif axis >= ndim_a or axis < -ndim_a:
raise ValueError("axis arg too large for np.ndim(a)")
else:
axis = axis if axis >= 0 else ndim_a + axis # negative indexing
if ndim_idx > 1:
# TODO: we could support a sequence of axis values for multiple
# dimensions of group_idx.
raise NotImplementedError("only 1d indexing currently"
"supported with axis arg.")
elif a.shape[axis] != len(group_idx):
raise ValueError("a.shape[axis] doesn't match length of group_idx.")
elif size is not None and not np.isscalar(size):
raise NotImplementedError("when using axis arg, size must be"
"None or scalar.")
else:
# Create the broadcast-ready multidimensional indexing.
# Note the user could do this themselves, so this is
# very much just a convenience.
size_in = np.max(group_idx) + 1 if size is None else size
group_idx_in = group_idx
group_idx = []
size = []
for ii, s in enumerate(a.shape):
ii_idx = group_idx_in if ii == axis else np.arange(s)
ii_shape = [1] * ndim_a
ii_shape[ii] = s
group_idx.append(ii_idx.reshape(ii_shape))
size.append(size_in if ii == axis else s)
# Use the indexing, and return. It's a bit simpler than
# using trying to keep all the logic below happy
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
ndim_idx = ndim_a
return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size
if ndim_idx == 1:
if size is None:
size = np.max(group_idx) + 1
else:
if not np.isscalar(size):
raise ValueError("output size must be scalar or None")
if check_bounds and np.any(group_idx > size - 1):
raise ValueError("one or more indices are too large for "
"size %d" % size)
flat_size = size
else:
if size is None:
size = np.max(group_idx, axis=1) + 1
elif np.isscalar(size):
raise ValueError("output size must be of length %d"
% len(group_idx))
elif len(size) != len(group_idx):
raise ValueError("%d sizes given, but %d output dimensions "
"specified in index" % (len(size),
len(group_idx)))
if ravel_group_idx:
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
if not (np.ndim(a) == 0 or len(a) == group_idx.size):
raise ValueError("group_idx and a must be of the same length, or a"
" can be scalar")
return group_idx, a, flat_size, ndim_idx, size
### General tools ###
def unpack(group_idx, ret):
""" Take an aggregate packed array and uncompress it to the size of group_idx.
This is equivalent to ret[group_idx].
"""
return ret[group_idx]
def allnan(x):
return np.all(np.isnan(x))
def anynan(x):
return np.any(np.isnan(x))
def nanfirst(x):
return x[~np.isnan(x)][0]
def nanlast(x):
return x[~np.isnan(x)][-1]
def multi_arange(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise ValueError("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1] + 1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def label_contiguous_1d(X):
"""
WARNING: API for this function is not liable to change!!!
By example:
X = [F T T F F T F F F T T T]
result = [0 1 1 0 0 2 0 0 0 3 3 3]
Or:
X = [0 3 3 0 0 5 5 5 1 1 0 2]
result = [0 1 1 0 0 2 2 2 3 3 0 4]
The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X``
is a boolean array, each contiguous block of ``True`` is given an integer
label, if ``X`` is not boolean, then each contiguous block of identical values
is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1
and increase by 1 for each block with no skipped numbers.)
"""
if X.ndim != 1:
raise ValueError("this is for 1d masks only.")
is_start = np.empty(len(X), dtype=bool)
is_start[0] = X[0] # True if X[0] is True or non-zero
if X.dtype.kind == 'b':
is_start[1:] = ~X[:-1] & X[1:]
M = X
else:
M = X.astype(bool)
is_start[1:] = X[:-1] != X[1:]
is_start[~M] = False
L = np.cumsum(is_start)
L[~M] = 0
return L
def relabel_groups_unique(group_idx):
"""
See also ``relabel_groups_masked``.
keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4]
Description of above: unique groups in input was ``1,2,3,5``, i.e.
``4`` was missing, so group 5 was relabled to be ``4``.
Relabeling maintains order, just "compressing" the higher numbers
to fill gaps.
"""
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool)
keep_group[0] = True
keep_group[group_idx] = True
return relabel_groups_masked(group_idx, keep_group)
def relabel_groups_masked(group_idx, keep_group):
"""
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and
"""
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx]
|
ml31415/numpy-groupies | numpy_groupies/utils_numpy.py | relabel_groups_unique | python | def relabel_groups_unique(group_idx):
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool)
keep_group[0] = True
keep_group[group_idx] = True
return relabel_groups_masked(group_idx, keep_group) | See also ``relabel_groups_masked``.
keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4]
Description of above: unique groups in input was ``1,2,3,5``, i.e.
``4`` was missing, so group 5 was relabled to be ``4``.
Relabeling maintains order, just "compressing" the higher numbers
to fill gaps. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L375-L391 | [
"def relabel_groups_masked(group_idx, keep_group):\n \"\"\"\n group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]\n\n 0 1 2 3 4 5\n keep_group: [0 1 0 1 1 1]\n\n ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]\n\n Description of above in words: remove group 2, and relabel group 3,4, and 5\n to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used\n in the input group_idx, but the user supplied mask said to keep group 4, so group\n 5 is only moved up by one place to fill the gap created by removing group 2.\n\n That is, the mask describes which groups to remove,\n the remaining groups are relabled to remove the gaps created by the falsy\n elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers\n to the zero group which cannot be \"removed\".\n\n ``keep_group`` should be bool and ``group_idx`` int.\n Values in ``group_idx`` can be any order, and \n \"\"\"\n\n keep_group = keep_group.astype(bool, copy=not keep_group[0])\n if not keep_group[0]: # ensuring keep_group[0] is True makes life easier\n keep_group[0] = True\n\n relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)\n relabel[keep_group] = np.arange(np.count_nonzero(keep_group))\n return relabel[group_idx]\n"
] | """Common helper functions for typing and general numpy tools."""
import numpy as np
from .utils import get_aliasing
_alias_numpy = {
np.add: 'sum',
np.sum: 'sum',
np.any: 'any',
np.all: 'all',
np.multiply: 'prod',
np.prod: 'prod',
np.amin: 'min',
np.min: 'min',
np.minimum: 'min',
np.amax: 'max',
np.max: 'max',
np.maximum: 'max',
np.argmax: 'argmax',
np.argmin: 'argmin',
np.mean: 'mean',
np.std: 'std',
np.var: 'var',
np.array: 'array',
np.asarray: 'array',
np.sort: 'sort',
np.nansum: 'nansum',
np.nanprod: 'nanprod',
np.nanmean: 'nanmean',
np.nanvar: 'nanvar',
np.nanmax: 'nanmax',
np.nanmin: 'nanmin',
np.nanstd: 'nanstd',
np.nanargmax: 'nanargmax',
np.nanargmin: 'nanargmin',
np.cumsum: 'cumsum',
np.cumprod: 'cumprod',
}
aliasing = get_aliasing(_alias_numpy)
_next_int_dtype = dict(
bool=np.int8,
uint8=np.int16,
int8=np.int16,
uint16=np.int32,
int16=np.int32,
uint32=np.int64,
int32=np.int64
)
_next_float_dtype = dict(
float16=np.float32,
float32=np.float64,
float64=np.complex64,
complex64=np.complex128
)
def minimum_dtype(x, dtype=np.bool_):
"""returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype."""
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32)
def minimum_dtype_scalar(x, dtype, a):
if dtype is None:
dtype = np.dtype(type(a)) if isinstance(a, (int, float))\
else a.dtype
return minimum_dtype(x, dtype)
_forced_types = {
'array': np.object,
'all': np.bool_,
'any': np.bool_,
'nanall': np.bool_,
'nanany': np.bool_,
'len': np.int64,
'nanlen': np.int64,
'allnan': np.bool_,
'anynan': np.bool_,
'argmax': np.int64,
'argmin': np.int64,
}
_forced_float_types = {'mean', 'var', 'std', 'nanmean', 'nanvar', 'nanstd'}
_forced_same_type = {'min', 'max', 'first', 'last', 'nanmin', 'nanmax',
'nanfirst', 'nanlast'}
def check_dtype(dtype, func_str, a, n):
if np.isscalar(a) or not a.shape:
if func_str not in ("sum", "prod", "len"):
raise ValueError("scalar inputs are supported only for 'sum', "
"'prod' and 'len'")
a_dtype = np.dtype(type(a))
else:
a_dtype = a.dtype
if dtype is not None:
# dtype set by the user
# Careful here: np.bool != np.bool_ !
if np.issubdtype(dtype, np.bool_) and \
not('all' in func_str or 'any' in func_str):
raise TypeError("function %s requires a more complex datatype "
"than bool" % func_str)
if not np.issubdtype(dtype, np.integer) and func_str in ('len', 'nanlen'):
raise TypeError("function %s requires an integer datatype" % func_str)
# TODO: Maybe have some more checks here
return np.dtype(dtype)
else:
try:
return np.dtype(_forced_types[func_str])
except KeyError:
if func_str in _forced_float_types:
if np.issubdtype(a_dtype, np.floating):
return a_dtype
else:
return np.dtype(np.float64)
else:
if func_str == 'sum':
# Try to guess the minimally required int size
if np.issubdtype(a_dtype, np.int64):
# It's not getting bigger anymore
# TODO: strictly speaking it might need float
return np.dtype(np.int64)
elif np.issubdtype(a_dtype, np.integer):
maxval = np.iinfo(a_dtype).max * n
return minimum_dtype(maxval, a_dtype)
elif np.issubdtype(a_dtype, np.bool_):
return minimum_dtype(n, a_dtype)
else:
# floating, inexact, whatever
return a_dtype
elif func_str in _forced_same_type:
return a_dtype
else:
if isinstance(a_dtype, np.integer):
return np.dtype(np.int64)
else:
return a_dtype
def check_fill_value(fill_value, dtype):
try:
return dtype.type(fill_value)
except ValueError:
raise ValueError("fill_value must be convertible into %s"
% dtype.type.__name__)
def check_group_idx(group_idx, a=None, check_min=True):
if a is not None and group_idx.size != a.size:
raise ValueError("The size of group_idx must be the same as "
"a.size")
if not issubclass(group_idx.dtype.type, np.integer):
raise TypeError("group_idx must be of integer type")
if check_min and np.min(group_idx) < 0:
raise ValueError("group_idx contains negative indices")
def input_validation(group_idx, a, size=None, order='C', axis=None,
ravel_group_idx=True, check_bounds=True):
""" Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing.
"""
if not isinstance(a, (int, float, complex)):
a = np.asanyarray(a)
group_idx = np.asanyarray(group_idx)
if not np.issubdtype(group_idx.dtype, np.integer):
raise TypeError("group_idx must be of integer type")
# This check works for multidimensional indexing as well
if check_bounds and np.any(group_idx < 0):
raise ValueError("negative indices not supported")
ndim_idx = np.ndim(group_idx)
ndim_a = np.ndim(a)
# Deal with the axis arg: if present, then turn 1d indexing into
# multi-dimensional indexing along the specified axis.
if axis is None:
if ndim_a > 1:
raise ValueError("a must be scalar or 1 dimensional, use .ravel to"
" flatten. Alternatively specify axis.")
elif axis >= ndim_a or axis < -ndim_a:
raise ValueError("axis arg too large for np.ndim(a)")
else:
axis = axis if axis >= 0 else ndim_a + axis # negative indexing
if ndim_idx > 1:
# TODO: we could support a sequence of axis values for multiple
# dimensions of group_idx.
raise NotImplementedError("only 1d indexing currently"
"supported with axis arg.")
elif a.shape[axis] != len(group_idx):
raise ValueError("a.shape[axis] doesn't match length of group_idx.")
elif size is not None and not np.isscalar(size):
raise NotImplementedError("when using axis arg, size must be"
"None or scalar.")
else:
# Create the broadcast-ready multidimensional indexing.
# Note the user could do this themselves, so this is
# very much just a convenience.
size_in = np.max(group_idx) + 1 if size is None else size
group_idx_in = group_idx
group_idx = []
size = []
for ii, s in enumerate(a.shape):
ii_idx = group_idx_in if ii == axis else np.arange(s)
ii_shape = [1] * ndim_a
ii_shape[ii] = s
group_idx.append(ii_idx.reshape(ii_shape))
size.append(size_in if ii == axis else s)
# Use the indexing, and return. It's a bit simpler than
# using trying to keep all the logic below happy
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
ndim_idx = ndim_a
return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size
if ndim_idx == 1:
if size is None:
size = np.max(group_idx) + 1
else:
if not np.isscalar(size):
raise ValueError("output size must be scalar or None")
if check_bounds and np.any(group_idx > size - 1):
raise ValueError("one or more indices are too large for "
"size %d" % size)
flat_size = size
else:
if size is None:
size = np.max(group_idx, axis=1) + 1
elif np.isscalar(size):
raise ValueError("output size must be of length %d"
% len(group_idx))
elif len(size) != len(group_idx):
raise ValueError("%d sizes given, but %d output dimensions "
"specified in index" % (len(size),
len(group_idx)))
if ravel_group_idx:
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
if not (np.ndim(a) == 0 or len(a) == group_idx.size):
raise ValueError("group_idx and a must be of the same length, or a"
" can be scalar")
return group_idx, a, flat_size, ndim_idx, size
### General tools ###
def unpack(group_idx, ret):
""" Take an aggregate packed array and uncompress it to the size of group_idx.
This is equivalent to ret[group_idx].
"""
return ret[group_idx]
def allnan(x):
return np.all(np.isnan(x))
def anynan(x):
return np.any(np.isnan(x))
def nanfirst(x):
return x[~np.isnan(x)][0]
def nanlast(x):
return x[~np.isnan(x)][-1]
def multi_arange(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise ValueError("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1] + 1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def label_contiguous_1d(X):
"""
WARNING: API for this function is not liable to change!!!
By example:
X = [F T T F F T F F F T T T]
result = [0 1 1 0 0 2 0 0 0 3 3 3]
Or:
X = [0 3 3 0 0 5 5 5 1 1 0 2]
result = [0 1 1 0 0 2 2 2 3 3 0 4]
The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X``
is a boolean array, each contiguous block of ``True`` is given an integer
label, if ``X`` is not boolean, then each contiguous block of identical values
is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1
and increase by 1 for each block with no skipped numbers.)
"""
if X.ndim != 1:
raise ValueError("this is for 1d masks only.")
is_start = np.empty(len(X), dtype=bool)
is_start[0] = X[0] # True if X[0] is True or non-zero
if X.dtype.kind == 'b':
is_start[1:] = ~X[:-1] & X[1:]
M = X
else:
M = X.astype(bool)
is_start[1:] = X[:-1] != X[1:]
is_start[~M] = False
L = np.cumsum(is_start)
L[~M] = 0
return L
def relabel_groups_unique(group_idx):
"""
See also ``relabel_groups_masked``.
keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4]
Description of above: unique groups in input was ``1,2,3,5``, i.e.
``4`` was missing, so group 5 was relabled to be ``4``.
Relabeling maintains order, just "compressing" the higher numbers
to fill gaps.
"""
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool)
keep_group[0] = True
keep_group[group_idx] = True
return relabel_groups_masked(group_idx, keep_group)
def relabel_groups_masked(group_idx, keep_group):
"""
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and
"""
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx]
|
ml31415/numpy-groupies | numpy_groupies/utils_numpy.py | relabel_groups_masked | python | def relabel_groups_masked(group_idx, keep_group):
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx] | group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L394-L423 | null | """Common helper functions for typing and general numpy tools."""
import numpy as np
from .utils import get_aliasing
_alias_numpy = {
np.add: 'sum',
np.sum: 'sum',
np.any: 'any',
np.all: 'all',
np.multiply: 'prod',
np.prod: 'prod',
np.amin: 'min',
np.min: 'min',
np.minimum: 'min',
np.amax: 'max',
np.max: 'max',
np.maximum: 'max',
np.argmax: 'argmax',
np.argmin: 'argmin',
np.mean: 'mean',
np.std: 'std',
np.var: 'var',
np.array: 'array',
np.asarray: 'array',
np.sort: 'sort',
np.nansum: 'nansum',
np.nanprod: 'nanprod',
np.nanmean: 'nanmean',
np.nanvar: 'nanvar',
np.nanmax: 'nanmax',
np.nanmin: 'nanmin',
np.nanstd: 'nanstd',
np.nanargmax: 'nanargmax',
np.nanargmin: 'nanargmin',
np.cumsum: 'cumsum',
np.cumprod: 'cumprod',
}
aliasing = get_aliasing(_alias_numpy)
_next_int_dtype = dict(
bool=np.int8,
uint8=np.int16,
int8=np.int16,
uint16=np.int32,
int16=np.int32,
uint32=np.int64,
int32=np.int64
)
_next_float_dtype = dict(
float16=np.float32,
float32=np.float64,
float64=np.complex64,
complex64=np.complex128
)
def minimum_dtype(x, dtype=np.bool_):
"""returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype."""
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32)
def minimum_dtype_scalar(x, dtype, a):
if dtype is None:
dtype = np.dtype(type(a)) if isinstance(a, (int, float))\
else a.dtype
return minimum_dtype(x, dtype)
_forced_types = {
'array': np.object,
'all': np.bool_,
'any': np.bool_,
'nanall': np.bool_,
'nanany': np.bool_,
'len': np.int64,
'nanlen': np.int64,
'allnan': np.bool_,
'anynan': np.bool_,
'argmax': np.int64,
'argmin': np.int64,
}
_forced_float_types = {'mean', 'var', 'std', 'nanmean', 'nanvar', 'nanstd'}
_forced_same_type = {'min', 'max', 'first', 'last', 'nanmin', 'nanmax',
'nanfirst', 'nanlast'}
def check_dtype(dtype, func_str, a, n):
if np.isscalar(a) or not a.shape:
if func_str not in ("sum", "prod", "len"):
raise ValueError("scalar inputs are supported only for 'sum', "
"'prod' and 'len'")
a_dtype = np.dtype(type(a))
else:
a_dtype = a.dtype
if dtype is not None:
# dtype set by the user
# Careful here: np.bool != np.bool_ !
if np.issubdtype(dtype, np.bool_) and \
not('all' in func_str or 'any' in func_str):
raise TypeError("function %s requires a more complex datatype "
"than bool" % func_str)
if not np.issubdtype(dtype, np.integer) and func_str in ('len', 'nanlen'):
raise TypeError("function %s requires an integer datatype" % func_str)
# TODO: Maybe have some more checks here
return np.dtype(dtype)
else:
try:
return np.dtype(_forced_types[func_str])
except KeyError:
if func_str in _forced_float_types:
if np.issubdtype(a_dtype, np.floating):
return a_dtype
else:
return np.dtype(np.float64)
else:
if func_str == 'sum':
# Try to guess the minimally required int size
if np.issubdtype(a_dtype, np.int64):
# It's not getting bigger anymore
# TODO: strictly speaking it might need float
return np.dtype(np.int64)
elif np.issubdtype(a_dtype, np.integer):
maxval = np.iinfo(a_dtype).max * n
return minimum_dtype(maxval, a_dtype)
elif np.issubdtype(a_dtype, np.bool_):
return minimum_dtype(n, a_dtype)
else:
# floating, inexact, whatever
return a_dtype
elif func_str in _forced_same_type:
return a_dtype
else:
if isinstance(a_dtype, np.integer):
return np.dtype(np.int64)
else:
return a_dtype
def check_fill_value(fill_value, dtype):
try:
return dtype.type(fill_value)
except ValueError:
raise ValueError("fill_value must be convertible into %s"
% dtype.type.__name__)
def check_group_idx(group_idx, a=None, check_min=True):
if a is not None and group_idx.size != a.size:
raise ValueError("The size of group_idx must be the same as "
"a.size")
if not issubclass(group_idx.dtype.type, np.integer):
raise TypeError("group_idx must be of integer type")
if check_min and np.min(group_idx) < 0:
raise ValueError("group_idx contains negative indices")
def input_validation(group_idx, a, size=None, order='C', axis=None,
ravel_group_idx=True, check_bounds=True):
""" Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing.
"""
if not isinstance(a, (int, float, complex)):
a = np.asanyarray(a)
group_idx = np.asanyarray(group_idx)
if not np.issubdtype(group_idx.dtype, np.integer):
raise TypeError("group_idx must be of integer type")
# This check works for multidimensional indexing as well
if check_bounds and np.any(group_idx < 0):
raise ValueError("negative indices not supported")
ndim_idx = np.ndim(group_idx)
ndim_a = np.ndim(a)
# Deal with the axis arg: if present, then turn 1d indexing into
# multi-dimensional indexing along the specified axis.
if axis is None:
if ndim_a > 1:
raise ValueError("a must be scalar or 1 dimensional, use .ravel to"
" flatten. Alternatively specify axis.")
elif axis >= ndim_a or axis < -ndim_a:
raise ValueError("axis arg too large for np.ndim(a)")
else:
axis = axis if axis >= 0 else ndim_a + axis # negative indexing
if ndim_idx > 1:
# TODO: we could support a sequence of axis values for multiple
# dimensions of group_idx.
raise NotImplementedError("only 1d indexing currently"
"supported with axis arg.")
elif a.shape[axis] != len(group_idx):
raise ValueError("a.shape[axis] doesn't match length of group_idx.")
elif size is not None and not np.isscalar(size):
raise NotImplementedError("when using axis arg, size must be"
"None or scalar.")
else:
# Create the broadcast-ready multidimensional indexing.
# Note the user could do this themselves, so this is
# very much just a convenience.
size_in = np.max(group_idx) + 1 if size is None else size
group_idx_in = group_idx
group_idx = []
size = []
for ii, s in enumerate(a.shape):
ii_idx = group_idx_in if ii == axis else np.arange(s)
ii_shape = [1] * ndim_a
ii_shape[ii] = s
group_idx.append(ii_idx.reshape(ii_shape))
size.append(size_in if ii == axis else s)
# Use the indexing, and return. It's a bit simpler than
# using trying to keep all the logic below happy
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
ndim_idx = ndim_a
return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size
if ndim_idx == 1:
if size is None:
size = np.max(group_idx) + 1
else:
if not np.isscalar(size):
raise ValueError("output size must be scalar or None")
if check_bounds and np.any(group_idx > size - 1):
raise ValueError("one or more indices are too large for "
"size %d" % size)
flat_size = size
else:
if size is None:
size = np.max(group_idx, axis=1) + 1
elif np.isscalar(size):
raise ValueError("output size must be of length %d"
% len(group_idx))
elif len(size) != len(group_idx):
raise ValueError("%d sizes given, but %d output dimensions "
"specified in index" % (len(size),
len(group_idx)))
if ravel_group_idx:
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
if not (np.ndim(a) == 0 or len(a) == group_idx.size):
raise ValueError("group_idx and a must be of the same length, or a"
" can be scalar")
return group_idx, a, flat_size, ndim_idx, size
### General tools ###
def unpack(group_idx, ret):
""" Take an aggregate packed array and uncompress it to the size of group_idx.
This is equivalent to ret[group_idx].
"""
return ret[group_idx]
def allnan(x):
return np.all(np.isnan(x))
def anynan(x):
return np.any(np.isnan(x))
def nanfirst(x):
return x[~np.isnan(x)][0]
def nanlast(x):
return x[~np.isnan(x)][-1]
def multi_arange(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise ValueError("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1] + 1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def label_contiguous_1d(X):
"""
WARNING: API for this function is not liable to change!!!
By example:
X = [F T T F F T F F F T T T]
result = [0 1 1 0 0 2 0 0 0 3 3 3]
Or:
X = [0 3 3 0 0 5 5 5 1 1 0 2]
result = [0 1 1 0 0 2 2 2 3 3 0 4]
The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X``
is a boolean array, each contiguous block of ``True`` is given an integer
label, if ``X`` is not boolean, then each contiguous block of identical values
is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1
and increase by 1 for each block with no skipped numbers.)
"""
if X.ndim != 1:
raise ValueError("this is for 1d masks only.")
is_start = np.empty(len(X), dtype=bool)
is_start[0] = X[0] # True if X[0] is True or non-zero
if X.dtype.kind == 'b':
is_start[1:] = ~X[:-1] & X[1:]
M = X
else:
M = X.astype(bool)
is_start[1:] = X[:-1] != X[1:]
is_start[~M] = False
L = np.cumsum(is_start)
L[~M] = 0
return L
def relabel_groups_unique(group_idx):
"""
See also ``relabel_groups_masked``.
keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4]
Description of above: unique groups in input was ``1,2,3,5``, i.e.
``4`` was missing, so group 5 was relabled to be ``4``.
Relabeling maintains order, just "compressing" the higher numbers
to fill gaps.
"""
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool)
keep_group[0] = True
keep_group[group_idx] = True
return relabel_groups_masked(group_idx, keep_group)
def relabel_groups_masked(group_idx, keep_group):
"""
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and
"""
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx]
|
ml31415/numpy-groupies | numpy_groupies/aggregate_numpy.py | _array | python | def _array(group_idx, a, size, fill_value, dtype=None):
if fill_value is not None and not (np.isscalar(fill_value) or
len(fill_value) == 0):
raise ValueError("fill_value must be None, a scalar or an empty "
"sequence")
order_group_idx = np.argsort(group_idx, kind='mergesort')
counts = np.bincount(group_idx, minlength=size)
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asanyarray(ret)
if fill_value is None or np.isscalar(fill_value):
_fill_untouched(group_idx, ret, fill_value)
return ret | groups a into separate arrays, keeping the order intact. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L188-L200 | null | import numpy as np
from .utils import check_boolean, funcs_no_separate_nan, get_func, aggregate_common_doc, isstr
from .utils_numpy import (aliasing, minimum_dtype, input_validation,
check_dtype, minimum_dtype_scalar)
def _sum(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
if np.ndim(a) == 0:
ret = np.bincount(group_idx, minlength=size).astype(dtype)
if a != 1:
ret *= a
else:
if np.iscomplexobj(a):
ret = np.empty(size, dtype=dtype)
ret.real = np.bincount(group_idx, weights=a.real,
minlength=size)
ret.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
ret = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
if fill_value != 0:
_fill_untouched(group_idx, ret, fill_value)
return ret
def _prod(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 1:
ret[group_idx] = 1 # product starts from 1
np.multiply.at(ret, group_idx, a)
return ret
def _len(group_idx, a, size, fill_value, dtype=None):
return _sum(group_idx, 1, size, fill_value, dtype=int)
def _last(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
# repeated indexing gives last value, see:
# the phrase "leaving behind the last value" on this page:
# http://wiki.scipy.org/Tentative_NumPy_Tutorial
ret[group_idx] = a
return ret
def _first(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
ret[group_idx[::-1]] = a[::-1] # same trick as _last, but in reverse
return ret
def _all(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if not fill_value:
ret[group_idx] = True
ret[group_idx.compress(np.logical_not(a))] = False
return ret
def _any(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if fill_value:
ret[group_idx] = False
ret[group_idx.compress(a)] = True
return ret
def _min(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmax:
ret[group_idx] = dmax # min starts from maximum
np.minimum.at(ret, group_idx, a)
return ret
def _max(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmin:
ret[group_idx] = dmin # max starts from minimum
np.maximum.at(ret, group_idx, a)
return ret
def _argmax(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
group_max = _max(group_idx, a, size, dmin)
is_max = a == group_max[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_max = group_idx[is_max]
argmax, = is_max.nonzero()
ret[group_idx_max[::-1]] = argmax[::-1] # reverse to ensure first value for each group wins
return ret
def _argmin(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
group_min = _min(group_idx, a, size, dmax)
is_min = a == group_min[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_min = group_idx[is_min]
argmin, = is_min.nonzero()
ret[group_idx_min[::-1]] = argmin[::-1] # reverse to ensure first value for each group wins
return ret
def _mean(group_idx, a, size, fill_value, dtype=np.dtype(np.float64)):
if np.ndim(a) == 0:
raise ValueError("cannot take mean with scalar a")
counts = np.bincount(group_idx, minlength=size)
if np.iscomplexobj(a):
dtype = a.dtype # TODO: this is a bit clumsy
sums = np.empty(size, dtype=dtype)
sums.real = np.bincount(group_idx, weights=a.real,
minlength=size)
sums.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
sums = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
with np.errstate(divide='ignore', invalid='ignore'):
ret = sums.astype(dtype) / counts
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _var(group_idx, a, size, fill_value, dtype=np.dtype(np.float64),
sqrt=False, ddof=0):
if np.ndim(a) == 0:
raise ValueError("cannot take variance with scalar a")
counts = np.bincount(group_idx, minlength=size)
sums = np.bincount(group_idx, weights=a, minlength=size)
with np.errstate(divide='ignore'):
means = sums.astype(dtype) / counts
ret = np.bincount(group_idx, (a - means[group_idx]) ** 2,
minlength=size) / (counts - ddof)
if sqrt:
ret = np.sqrt(ret) # this is now std not var
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _std(group_idx, a, size, fill_value, dtype=np.dtype(np.float64), ddof=0):
return _var(group_idx, a, size, fill_value, dtype=dtype, sqrt=True,
ddof=ddof)
def _allnan(group_idx, a, size, fill_value, dtype=bool):
return _all(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _anynan(group_idx, a, size, fill_value, dtype=bool):
return _any(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _sort(group_idx, a, size=None, fill_value=None, dtype=None, reverse=False):
sortidx = np.lexsort((-a if reverse else a, group_idx))
# Reverse sorting back to into grouped order, but preserving groupwise sorting
revidx = np.argsort(np.argsort(group_idx, kind='mergesort'), kind='mergesort')
return a[sortidx][revidx]
def _generic_callable(group_idx, a, size, fill_value, dtype=None,
func=lambda g: g, **kwargs):
"""groups a by inds, and then applies foo to each group in turn, placing
the results in an array."""
groups = _array(group_idx, a, size, ())
ret = np.full(size, fill_value, dtype=dtype or np.float64)
for i, grp in enumerate(groups):
if np.ndim(grp) == 1 and len(grp) > 0:
ret[i] = func(grp)
return ret
def _cumsum(group_idx, a, size, fill_value=None, dtype=None):
"""
N to N aggregate operation of cumsum. Perform cumulative sum for each group.
group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
_cumsum(group_idx, a, np.max(group_idx) + 1)
>>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39])
"""
sortidx = np.argsort(group_idx, kind='mergesort')
invsortidx = np.argsort(sortidx, kind='mergesort')
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
a_srt_cumsum = np.cumsum(a_srt, dtype=dtype)
increasing = np.arange(len(a), dtype=int)
group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt]
a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts]
return a_srt_cumsum[invsortidx]
def _nancumsum(group_idx, a, size, fill_value=None, dtype=None):
a_nonans = np.where(np.isnan(a), 0, a)
group_idx_nonans = np.where(np.isnan(group_idx), np.nanmax(group_idx) + 1, group_idx)
return _cumsum(group_idx_nonans, a_nonans, size, fill_value=fill_value, dtype=dtype)
_impl_dict = dict(min=_min, max=_max, sum=_sum, prod=_prod, last=_last,
first=_first, all=_all, any=_any, mean=_mean, std=_std,
var=_var, anynan=_anynan, allnan=_allnan, sort=_sort,
array=_array, argmax=_argmax, argmin=_argmin, len=_len,
cumsum=_cumsum, generic=_generic_callable)
_impl_dict.update(('nan' + k, v) for k, v in list(_impl_dict.items())
if k not in funcs_no_separate_nan)
def _aggregate_base(group_idx, a, func='sum', size=None, fill_value=0,
order='C', dtype=None, axis=None, _impl_dict=_impl_dict,
_nansqueeze=False, cache=None, **kwargs):
group_idx, a, flat_size, ndim_idx, size = input_validation(group_idx, a,
size=size, order=order, axis=axis)
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
# do simple grouping and execute function in loop
ret = _impl_dict.get('generic', _generic_callable)(group_idx, a, flat_size, fill_value, func=func,
dtype=dtype, **kwargs)
else:
# deal with nans and find the function
if func.startswith('nan'):
if np.ndim(a) == 0:
raise ValueError("nan-version not supported for scalar input.")
if _nansqueeze:
good = ~np.isnan(a)
a = a[good]
group_idx = group_idx[good]
dtype = check_dtype(dtype, func, a, flat_size)
func = _impl_dict[func]
ret = func(group_idx, a, flat_size, fill_value=fill_value, dtype=dtype,
**kwargs)
# deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,
order=order, dtype=dtype, func=func, axis=axis,
_impl_dict=_impl_dict, _nansqueeze=True, **kwargs)
aggregate.__doc__ = """
This is the pure numpy implementation of aggregate.
""" + aggregate_common_doc
def _fill_untouched(idx, ret, fill_value):
"""any elements of ret not indexed by idx are set to fill_value."""
untouched = np.ones_like(ret, dtype=bool)
untouched[idx] = False
ret[untouched] = fill_value
|
ml31415/numpy-groupies | numpy_groupies/aggregate_numpy.py | _generic_callable | python | def _generic_callable(group_idx, a, size, fill_value, dtype=None,
func=lambda g: g, **kwargs):
groups = _array(group_idx, a, size, ())
ret = np.full(size, fill_value, dtype=dtype or np.float64)
for i, grp in enumerate(groups):
if np.ndim(grp) == 1 and len(grp) > 0:
ret[i] = func(grp)
return ret | groups a by inds, and then applies foo to each group in turn, placing
the results in an array. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L203-L213 | null | import numpy as np
from .utils import check_boolean, funcs_no_separate_nan, get_func, aggregate_common_doc, isstr
from .utils_numpy import (aliasing, minimum_dtype, input_validation,
check_dtype, minimum_dtype_scalar)
def _sum(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
if np.ndim(a) == 0:
ret = np.bincount(group_idx, minlength=size).astype(dtype)
if a != 1:
ret *= a
else:
if np.iscomplexobj(a):
ret = np.empty(size, dtype=dtype)
ret.real = np.bincount(group_idx, weights=a.real,
minlength=size)
ret.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
ret = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
if fill_value != 0:
_fill_untouched(group_idx, ret, fill_value)
return ret
def _prod(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 1:
ret[group_idx] = 1 # product starts from 1
np.multiply.at(ret, group_idx, a)
return ret
def _len(group_idx, a, size, fill_value, dtype=None):
return _sum(group_idx, 1, size, fill_value, dtype=int)
def _last(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
# repeated indexing gives last value, see:
# the phrase "leaving behind the last value" on this page:
# http://wiki.scipy.org/Tentative_NumPy_Tutorial
ret[group_idx] = a
return ret
def _first(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
ret[group_idx[::-1]] = a[::-1] # same trick as _last, but in reverse
return ret
def _all(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if not fill_value:
ret[group_idx] = True
ret[group_idx.compress(np.logical_not(a))] = False
return ret
def _any(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if fill_value:
ret[group_idx] = False
ret[group_idx.compress(a)] = True
return ret
def _min(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmax:
ret[group_idx] = dmax # min starts from maximum
np.minimum.at(ret, group_idx, a)
return ret
def _max(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmin:
ret[group_idx] = dmin # max starts from minimum
np.maximum.at(ret, group_idx, a)
return ret
def _argmax(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
group_max = _max(group_idx, a, size, dmin)
is_max = a == group_max[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_max = group_idx[is_max]
argmax, = is_max.nonzero()
ret[group_idx_max[::-1]] = argmax[::-1] # reverse to ensure first value for each group wins
return ret
def _argmin(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
group_min = _min(group_idx, a, size, dmax)
is_min = a == group_min[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_min = group_idx[is_min]
argmin, = is_min.nonzero()
ret[group_idx_min[::-1]] = argmin[::-1] # reverse to ensure first value for each group wins
return ret
def _mean(group_idx, a, size, fill_value, dtype=np.dtype(np.float64)):
if np.ndim(a) == 0:
raise ValueError("cannot take mean with scalar a")
counts = np.bincount(group_idx, minlength=size)
if np.iscomplexobj(a):
dtype = a.dtype # TODO: this is a bit clumsy
sums = np.empty(size, dtype=dtype)
sums.real = np.bincount(group_idx, weights=a.real,
minlength=size)
sums.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
sums = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
with np.errstate(divide='ignore', invalid='ignore'):
ret = sums.astype(dtype) / counts
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _var(group_idx, a, size, fill_value, dtype=np.dtype(np.float64),
sqrt=False, ddof=0):
if np.ndim(a) == 0:
raise ValueError("cannot take variance with scalar a")
counts = np.bincount(group_idx, minlength=size)
sums = np.bincount(group_idx, weights=a, minlength=size)
with np.errstate(divide='ignore'):
means = sums.astype(dtype) / counts
ret = np.bincount(group_idx, (a - means[group_idx]) ** 2,
minlength=size) / (counts - ddof)
if sqrt:
ret = np.sqrt(ret) # this is now std not var
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _std(group_idx, a, size, fill_value, dtype=np.dtype(np.float64), ddof=0):
return _var(group_idx, a, size, fill_value, dtype=dtype, sqrt=True,
ddof=ddof)
def _allnan(group_idx, a, size, fill_value, dtype=bool):
return _all(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _anynan(group_idx, a, size, fill_value, dtype=bool):
return _any(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _sort(group_idx, a, size=None, fill_value=None, dtype=None, reverse=False):
sortidx = np.lexsort((-a if reverse else a, group_idx))
# Reverse sorting back to into grouped order, but preserving groupwise sorting
revidx = np.argsort(np.argsort(group_idx, kind='mergesort'), kind='mergesort')
return a[sortidx][revidx]
def _array(group_idx, a, size, fill_value, dtype=None):
"""groups a into separate arrays, keeping the order intact."""
if fill_value is not None and not (np.isscalar(fill_value) or
len(fill_value) == 0):
raise ValueError("fill_value must be None, a scalar or an empty "
"sequence")
order_group_idx = np.argsort(group_idx, kind='mergesort')
counts = np.bincount(group_idx, minlength=size)
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asanyarray(ret)
if fill_value is None or np.isscalar(fill_value):
_fill_untouched(group_idx, ret, fill_value)
return ret
def _cumsum(group_idx, a, size, fill_value=None, dtype=None):
"""
N to N aggregate operation of cumsum. Perform cumulative sum for each group.
group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
_cumsum(group_idx, a, np.max(group_idx) + 1)
>>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39])
"""
sortidx = np.argsort(group_idx, kind='mergesort')
invsortidx = np.argsort(sortidx, kind='mergesort')
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
a_srt_cumsum = np.cumsum(a_srt, dtype=dtype)
increasing = np.arange(len(a), dtype=int)
group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt]
a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts]
return a_srt_cumsum[invsortidx]
def _nancumsum(group_idx, a, size, fill_value=None, dtype=None):
a_nonans = np.where(np.isnan(a), 0, a)
group_idx_nonans = np.where(np.isnan(group_idx), np.nanmax(group_idx) + 1, group_idx)
return _cumsum(group_idx_nonans, a_nonans, size, fill_value=fill_value, dtype=dtype)
_impl_dict = dict(min=_min, max=_max, sum=_sum, prod=_prod, last=_last,
first=_first, all=_all, any=_any, mean=_mean, std=_std,
var=_var, anynan=_anynan, allnan=_allnan, sort=_sort,
array=_array, argmax=_argmax, argmin=_argmin, len=_len,
cumsum=_cumsum, generic=_generic_callable)
_impl_dict.update(('nan' + k, v) for k, v in list(_impl_dict.items())
if k not in funcs_no_separate_nan)
def _aggregate_base(group_idx, a, func='sum', size=None, fill_value=0,
order='C', dtype=None, axis=None, _impl_dict=_impl_dict,
_nansqueeze=False, cache=None, **kwargs):
group_idx, a, flat_size, ndim_idx, size = input_validation(group_idx, a,
size=size, order=order, axis=axis)
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
# do simple grouping and execute function in loop
ret = _impl_dict.get('generic', _generic_callable)(group_idx, a, flat_size, fill_value, func=func,
dtype=dtype, **kwargs)
else:
# deal with nans and find the function
if func.startswith('nan'):
if np.ndim(a) == 0:
raise ValueError("nan-version not supported for scalar input.")
if _nansqueeze:
good = ~np.isnan(a)
a = a[good]
group_idx = group_idx[good]
dtype = check_dtype(dtype, func, a, flat_size)
func = _impl_dict[func]
ret = func(group_idx, a, flat_size, fill_value=fill_value, dtype=dtype,
**kwargs)
# deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,
order=order, dtype=dtype, func=func, axis=axis,
_impl_dict=_impl_dict, _nansqueeze=True, **kwargs)
aggregate.__doc__ = """
This is the pure numpy implementation of aggregate.
""" + aggregate_common_doc
def _fill_untouched(idx, ret, fill_value):
"""any elements of ret not indexed by idx are set to fill_value."""
untouched = np.ones_like(ret, dtype=bool)
untouched[idx] = False
ret[untouched] = fill_value
|
ml31415/numpy-groupies | numpy_groupies/aggregate_numpy.py | _cumsum | python | def _cumsum(group_idx, a, size, fill_value=None, dtype=None):
sortidx = np.argsort(group_idx, kind='mergesort')
invsortidx = np.argsort(sortidx, kind='mergesort')
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
a_srt_cumsum = np.cumsum(a_srt, dtype=dtype)
increasing = np.arange(len(a), dtype=int)
group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt]
a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts]
return a_srt_cumsum[invsortidx] | N to N aggregate operation of cumsum. Perform cumulative sum for each group.
group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
_cumsum(group_idx, a, np.max(group_idx) + 1)
>>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39]) | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L216-L235 | null | import numpy as np
from .utils import check_boolean, funcs_no_separate_nan, get_func, aggregate_common_doc, isstr
from .utils_numpy import (aliasing, minimum_dtype, input_validation,
check_dtype, minimum_dtype_scalar)
def _sum(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
if np.ndim(a) == 0:
ret = np.bincount(group_idx, minlength=size).astype(dtype)
if a != 1:
ret *= a
else:
if np.iscomplexobj(a):
ret = np.empty(size, dtype=dtype)
ret.real = np.bincount(group_idx, weights=a.real,
minlength=size)
ret.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
ret = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
if fill_value != 0:
_fill_untouched(group_idx, ret, fill_value)
return ret
def _prod(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 1:
ret[group_idx] = 1 # product starts from 1
np.multiply.at(ret, group_idx, a)
return ret
def _len(group_idx, a, size, fill_value, dtype=None):
return _sum(group_idx, 1, size, fill_value, dtype=int)
def _last(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
# repeated indexing gives last value, see:
# the phrase "leaving behind the last value" on this page:
# http://wiki.scipy.org/Tentative_NumPy_Tutorial
ret[group_idx] = a
return ret
def _first(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
ret[group_idx[::-1]] = a[::-1] # same trick as _last, but in reverse
return ret
def _all(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if not fill_value:
ret[group_idx] = True
ret[group_idx.compress(np.logical_not(a))] = False
return ret
def _any(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if fill_value:
ret[group_idx] = False
ret[group_idx.compress(a)] = True
return ret
def _min(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmax:
ret[group_idx] = dmax # min starts from maximum
np.minimum.at(ret, group_idx, a)
return ret
def _max(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmin:
ret[group_idx] = dmin # max starts from minimum
np.maximum.at(ret, group_idx, a)
return ret
def _argmax(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
group_max = _max(group_idx, a, size, dmin)
is_max = a == group_max[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_max = group_idx[is_max]
argmax, = is_max.nonzero()
ret[group_idx_max[::-1]] = argmax[::-1] # reverse to ensure first value for each group wins
return ret
def _argmin(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
group_min = _min(group_idx, a, size, dmax)
is_min = a == group_min[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_min = group_idx[is_min]
argmin, = is_min.nonzero()
ret[group_idx_min[::-1]] = argmin[::-1] # reverse to ensure first value for each group wins
return ret
def _mean(group_idx, a, size, fill_value, dtype=np.dtype(np.float64)):
if np.ndim(a) == 0:
raise ValueError("cannot take mean with scalar a")
counts = np.bincount(group_idx, minlength=size)
if np.iscomplexobj(a):
dtype = a.dtype # TODO: this is a bit clumsy
sums = np.empty(size, dtype=dtype)
sums.real = np.bincount(group_idx, weights=a.real,
minlength=size)
sums.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
sums = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
with np.errstate(divide='ignore', invalid='ignore'):
ret = sums.astype(dtype) / counts
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _var(group_idx, a, size, fill_value, dtype=np.dtype(np.float64),
sqrt=False, ddof=0):
if np.ndim(a) == 0:
raise ValueError("cannot take variance with scalar a")
counts = np.bincount(group_idx, minlength=size)
sums = np.bincount(group_idx, weights=a, minlength=size)
with np.errstate(divide='ignore'):
means = sums.astype(dtype) / counts
ret = np.bincount(group_idx, (a - means[group_idx]) ** 2,
minlength=size) / (counts - ddof)
if sqrt:
ret = np.sqrt(ret) # this is now std not var
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _std(group_idx, a, size, fill_value, dtype=np.dtype(np.float64), ddof=0):
return _var(group_idx, a, size, fill_value, dtype=dtype, sqrt=True,
ddof=ddof)
def _allnan(group_idx, a, size, fill_value, dtype=bool):
return _all(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _anynan(group_idx, a, size, fill_value, dtype=bool):
return _any(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _sort(group_idx, a, size=None, fill_value=None, dtype=None, reverse=False):
sortidx = np.lexsort((-a if reverse else a, group_idx))
# Reverse sorting back to into grouped order, but preserving groupwise sorting
revidx = np.argsort(np.argsort(group_idx, kind='mergesort'), kind='mergesort')
return a[sortidx][revidx]
def _array(group_idx, a, size, fill_value, dtype=None):
"""groups a into separate arrays, keeping the order intact."""
if fill_value is not None and not (np.isscalar(fill_value) or
len(fill_value) == 0):
raise ValueError("fill_value must be None, a scalar or an empty "
"sequence")
order_group_idx = np.argsort(group_idx, kind='mergesort')
counts = np.bincount(group_idx, minlength=size)
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asanyarray(ret)
if fill_value is None or np.isscalar(fill_value):
_fill_untouched(group_idx, ret, fill_value)
return ret
def _generic_callable(group_idx, a, size, fill_value, dtype=None,
func=lambda g: g, **kwargs):
"""groups a by inds, and then applies foo to each group in turn, placing
the results in an array."""
groups = _array(group_idx, a, size, ())
ret = np.full(size, fill_value, dtype=dtype or np.float64)
for i, grp in enumerate(groups):
if np.ndim(grp) == 1 and len(grp) > 0:
ret[i] = func(grp)
return ret
def _nancumsum(group_idx, a, size, fill_value=None, dtype=None):
a_nonans = np.where(np.isnan(a), 0, a)
group_idx_nonans = np.where(np.isnan(group_idx), np.nanmax(group_idx) + 1, group_idx)
return _cumsum(group_idx_nonans, a_nonans, size, fill_value=fill_value, dtype=dtype)
_impl_dict = dict(min=_min, max=_max, sum=_sum, prod=_prod, last=_last,
first=_first, all=_all, any=_any, mean=_mean, std=_std,
var=_var, anynan=_anynan, allnan=_allnan, sort=_sort,
array=_array, argmax=_argmax, argmin=_argmin, len=_len,
cumsum=_cumsum, generic=_generic_callable)
_impl_dict.update(('nan' + k, v) for k, v in list(_impl_dict.items())
if k not in funcs_no_separate_nan)
def _aggregate_base(group_idx, a, func='sum', size=None, fill_value=0,
order='C', dtype=None, axis=None, _impl_dict=_impl_dict,
_nansqueeze=False, cache=None, **kwargs):
group_idx, a, flat_size, ndim_idx, size = input_validation(group_idx, a,
size=size, order=order, axis=axis)
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
# do simple grouping and execute function in loop
ret = _impl_dict.get('generic', _generic_callable)(group_idx, a, flat_size, fill_value, func=func,
dtype=dtype, **kwargs)
else:
# deal with nans and find the function
if func.startswith('nan'):
if np.ndim(a) == 0:
raise ValueError("nan-version not supported for scalar input.")
if _nansqueeze:
good = ~np.isnan(a)
a = a[good]
group_idx = group_idx[good]
dtype = check_dtype(dtype, func, a, flat_size)
func = _impl_dict[func]
ret = func(group_idx, a, flat_size, fill_value=fill_value, dtype=dtype,
**kwargs)
# deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,
order=order, dtype=dtype, func=func, axis=axis,
_impl_dict=_impl_dict, _nansqueeze=True, **kwargs)
aggregate.__doc__ = """
This is the pure numpy implementation of aggregate.
""" + aggregate_common_doc
def _fill_untouched(idx, ret, fill_value):
"""any elements of ret not indexed by idx are set to fill_value."""
untouched = np.ones_like(ret, dtype=bool)
untouched[idx] = False
ret[untouched] = fill_value
|
ml31415/numpy-groupies | numpy_groupies/aggregate_numpy.py | _fill_untouched | python | def _fill_untouched(idx, ret, fill_value):
untouched = np.ones_like(ret, dtype=bool)
untouched[idx] = False
ret[untouched] = fill_value | any elements of ret not indexed by idx are set to fill_value. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L296-L300 | null | import numpy as np
from .utils import check_boolean, funcs_no_separate_nan, get_func, aggregate_common_doc, isstr
from .utils_numpy import (aliasing, minimum_dtype, input_validation,
check_dtype, minimum_dtype_scalar)
def _sum(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
if np.ndim(a) == 0:
ret = np.bincount(group_idx, minlength=size).astype(dtype)
if a != 1:
ret *= a
else:
if np.iscomplexobj(a):
ret = np.empty(size, dtype=dtype)
ret.real = np.bincount(group_idx, weights=a.real,
minlength=size)
ret.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
ret = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
if fill_value != 0:
_fill_untouched(group_idx, ret, fill_value)
return ret
def _prod(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 1:
ret[group_idx] = 1 # product starts from 1
np.multiply.at(ret, group_idx, a)
return ret
def _len(group_idx, a, size, fill_value, dtype=None):
return _sum(group_idx, 1, size, fill_value, dtype=int)
def _last(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
# repeated indexing gives last value, see:
# the phrase "leaving behind the last value" on this page:
# http://wiki.scipy.org/Tentative_NumPy_Tutorial
ret[group_idx] = a
return ret
def _first(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
ret[group_idx[::-1]] = a[::-1] # same trick as _last, but in reverse
return ret
def _all(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if not fill_value:
ret[group_idx] = True
ret[group_idx.compress(np.logical_not(a))] = False
return ret
def _any(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if fill_value:
ret[group_idx] = False
ret[group_idx.compress(a)] = True
return ret
def _min(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmax:
ret[group_idx] = dmax # min starts from maximum
np.minimum.at(ret, group_idx, a)
return ret
def _max(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmin:
ret[group_idx] = dmin # max starts from minimum
np.maximum.at(ret, group_idx, a)
return ret
def _argmax(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
group_max = _max(group_idx, a, size, dmin)
is_max = a == group_max[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_max = group_idx[is_max]
argmax, = is_max.nonzero()
ret[group_idx_max[::-1]] = argmax[::-1] # reverse to ensure first value for each group wins
return ret
def _argmin(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
group_min = _min(group_idx, a, size, dmax)
is_min = a == group_min[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_min = group_idx[is_min]
argmin, = is_min.nonzero()
ret[group_idx_min[::-1]] = argmin[::-1] # reverse to ensure first value for each group wins
return ret
def _mean(group_idx, a, size, fill_value, dtype=np.dtype(np.float64)):
if np.ndim(a) == 0:
raise ValueError("cannot take mean with scalar a")
counts = np.bincount(group_idx, minlength=size)
if np.iscomplexobj(a):
dtype = a.dtype # TODO: this is a bit clumsy
sums = np.empty(size, dtype=dtype)
sums.real = np.bincount(group_idx, weights=a.real,
minlength=size)
sums.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
sums = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
with np.errstate(divide='ignore', invalid='ignore'):
ret = sums.astype(dtype) / counts
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _var(group_idx, a, size, fill_value, dtype=np.dtype(np.float64),
sqrt=False, ddof=0):
if np.ndim(a) == 0:
raise ValueError("cannot take variance with scalar a")
counts = np.bincount(group_idx, minlength=size)
sums = np.bincount(group_idx, weights=a, minlength=size)
with np.errstate(divide='ignore'):
means = sums.astype(dtype) / counts
ret = np.bincount(group_idx, (a - means[group_idx]) ** 2,
minlength=size) / (counts - ddof)
if sqrt:
ret = np.sqrt(ret) # this is now std not var
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _std(group_idx, a, size, fill_value, dtype=np.dtype(np.float64), ddof=0):
return _var(group_idx, a, size, fill_value, dtype=dtype, sqrt=True,
ddof=ddof)
def _allnan(group_idx, a, size, fill_value, dtype=bool):
return _all(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _anynan(group_idx, a, size, fill_value, dtype=bool):
return _any(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _sort(group_idx, a, size=None, fill_value=None, dtype=None, reverse=False):
sortidx = np.lexsort((-a if reverse else a, group_idx))
# Reverse sorting back to into grouped order, but preserving groupwise sorting
revidx = np.argsort(np.argsort(group_idx, kind='mergesort'), kind='mergesort')
return a[sortidx][revidx]
def _array(group_idx, a, size, fill_value, dtype=None):
"""groups a into separate arrays, keeping the order intact."""
if fill_value is not None and not (np.isscalar(fill_value) or
len(fill_value) == 0):
raise ValueError("fill_value must be None, a scalar or an empty "
"sequence")
order_group_idx = np.argsort(group_idx, kind='mergesort')
counts = np.bincount(group_idx, minlength=size)
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asanyarray(ret)
if fill_value is None or np.isscalar(fill_value):
_fill_untouched(group_idx, ret, fill_value)
return ret
def _generic_callable(group_idx, a, size, fill_value, dtype=None,
func=lambda g: g, **kwargs):
"""groups a by inds, and then applies foo to each group in turn, placing
the results in an array."""
groups = _array(group_idx, a, size, ())
ret = np.full(size, fill_value, dtype=dtype or np.float64)
for i, grp in enumerate(groups):
if np.ndim(grp) == 1 and len(grp) > 0:
ret[i] = func(grp)
return ret
def _cumsum(group_idx, a, size, fill_value=None, dtype=None):
"""
N to N aggregate operation of cumsum. Perform cumulative sum for each group.
group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
_cumsum(group_idx, a, np.max(group_idx) + 1)
>>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39])
"""
sortidx = np.argsort(group_idx, kind='mergesort')
invsortidx = np.argsort(sortidx, kind='mergesort')
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
a_srt_cumsum = np.cumsum(a_srt, dtype=dtype)
increasing = np.arange(len(a), dtype=int)
group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt]
a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts]
return a_srt_cumsum[invsortidx]
def _nancumsum(group_idx, a, size, fill_value=None, dtype=None):
a_nonans = np.where(np.isnan(a), 0, a)
group_idx_nonans = np.where(np.isnan(group_idx), np.nanmax(group_idx) + 1, group_idx)
return _cumsum(group_idx_nonans, a_nonans, size, fill_value=fill_value, dtype=dtype)
_impl_dict = dict(min=_min, max=_max, sum=_sum, prod=_prod, last=_last,
first=_first, all=_all, any=_any, mean=_mean, std=_std,
var=_var, anynan=_anynan, allnan=_allnan, sort=_sort,
array=_array, argmax=_argmax, argmin=_argmin, len=_len,
cumsum=_cumsum, generic=_generic_callable)
_impl_dict.update(('nan' + k, v) for k, v in list(_impl_dict.items())
if k not in funcs_no_separate_nan)
def _aggregate_base(group_idx, a, func='sum', size=None, fill_value=0,
order='C', dtype=None, axis=None, _impl_dict=_impl_dict,
_nansqueeze=False, cache=None, **kwargs):
group_idx, a, flat_size, ndim_idx, size = input_validation(group_idx, a,
size=size, order=order, axis=axis)
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
# do simple grouping and execute function in loop
ret = _impl_dict.get('generic', _generic_callable)(group_idx, a, flat_size, fill_value, func=func,
dtype=dtype, **kwargs)
else:
# deal with nans and find the function
if func.startswith('nan'):
if np.ndim(a) == 0:
raise ValueError("nan-version not supported for scalar input.")
if _nansqueeze:
good = ~np.isnan(a)
a = a[good]
group_idx = group_idx[good]
dtype = check_dtype(dtype, func, a, flat_size)
func = _impl_dict[func]
ret = func(group_idx, a, flat_size, fill_value=fill_value, dtype=dtype,
**kwargs)
# deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,
order=order, dtype=dtype, func=func, axis=axis,
_impl_dict=_impl_dict, _nansqueeze=True, **kwargs)
aggregate.__doc__ = """
This is the pure numpy implementation of aggregate.
""" + aggregate_common_doc
|
ml31415/numpy-groupies | numpy_groupies/benchmarks/generic.py | aggregate_grouploop | python | def aggregate_grouploop(*args, **kwargs):
extrafuncs = {'allnan': allnan, 'anynan': anynan,
'first': itemgetter(0), 'last': itemgetter(-1),
'nanfirst': nanfirst, 'nanlast': nanlast}
func = kwargs.pop('func')
func = extrafuncs.get(func, func)
if isinstance(func, str):
raise NotImplementedError("Grouploop needs to be called with a function")
return aggregate_numpy.aggregate(*args, func=lambda x: func(x), **kwargs) | wraps func in lambda which prevents aggregate_numpy from
recognising and optimising it. Instead it groups and loops. | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/benchmarks/generic.py#L13-L23 | [
"def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',\n dtype=None, axis=None, **kwargs):\n return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,\n order=order, dtype=dtype, func=func, axis=axis,\n _impl_dict=_impl_dict, _nansqueeze=True, **kwargs)\n"
] | #!/usr/bin/python -B
from __future__ import print_function
import sys
import platform
import timeit
from operator import itemgetter
import numpy as np
from numpy_groupies.tests import _implementations, aggregate_numpy
from numpy_groupies.utils_numpy import allnan, anynan, nanfirst, nanlast
def arbitrary(iterator):
tmp = 0
for i, x in enumerate(iterator, 1):
tmp += x ** i
return tmp
func_list = (np.sum, np.prod, np.min, np.max, len, np.all, np.any, 'anynan', 'allnan',
np.mean, np.std, np.var, 'first', 'last', 'argmax', 'argmin',
np.nansum, np.nanprod, np.nanmin, np.nanmax, 'nanlen', 'nanall', 'nanany',
np.nanmean, np.nanvar, np.nanstd, 'nanfirst', 'nanlast',
'cumsum', 'cumprod', 'cummax', 'cummin', arbitrary, 'sort')
def benchmark(implementations, size=5e5, repeat=5, seed=100):
rnd = np.random.RandomState(seed=seed)
group_idx = rnd.randint(0, int(1e3), int(size))
a = rnd.random_sample(group_idx.size)
a[a > 0.8] = 0
nana = a.copy()
nana[(nana < 0.2) & (nana != 0)] = np.nan
nan_share = np.mean(np.isnan(nana))
assert 0.15 < nan_share < 0.25, "%3f%% nans" % (nan_share * 100)
print("function" + ''.join(impl.__name__.rsplit('_', 1)[1].rjust(14) for impl in implementations))
print("-" * (9 + 14 * len(implementations)))
for func in func_list:
func_name = getattr(func, '__name__', func)
print(func_name.ljust(9), end='')
results = []
used_a = nana if 'nan' in func_name else a
for impl in implementations:
if impl is None:
print('----'.rjust(14), end='')
continue
aggregatefunc = impl.aggregate
try:
res = aggregatefunc(group_idx, used_a, func=func, cache=True)
except NotImplementedError:
print('----'.rjust(14), end='')
continue
except Exception:
print('ERROR'.rjust(14), end='')
else:
results.append(res)
try:
np.testing.assert_array_almost_equal(res, results[0])
except AssertionError:
print('FAIL'.rjust(14), end='')
else:
t0 = min(timeit.Timer(lambda: aggregatefunc(group_idx, used_a, func=func, cache=True)).repeat(repeat=repeat, number=1))
print(("%.3f" % (t0 * 1000)).rjust(14), end='')
sys.stdout.flush()
print()
implementation_names = [impl.__name__.rsplit('_', 1)[1] for impl in implementations]
postfix = ''
if 'numba' in implementation_names:
import numba
postfix += ', Numba %s' % numba.__version__
if 'weave' in implementation_names:
import weave
postfix += ', Weave %s' % weave.__version__
if 'pandas' in implementation_names:
import pandas
postfix += ', Pandas %s' % pandas.__version__
print("%s(%s), Python %s, Numpy %s%s" % (platform.system(), platform.machine(), sys.version.split()[0], np.version.version, postfix))
if __name__ == '__main__':
implementations = _implementations if '--purepy' in sys.argv else _implementations[1:]
implementations = implementations if '--pandas' in sys.argv else implementations[:-1]
benchmark(implementations)
|
ml31415/numpy-groupies | numpy_groupies/aggregate_numpy_ufunc.py | _prod | python | def _prod(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 1:
ret[group_idx] = 1 # product should start from 1
np.multiply.at(ret, group_idx, a)
return ret | Same as aggregate_numpy.py | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy_ufunc.py#L50-L57 | null | import numpy as np
from .utils import get_func, check_boolean, isstr, aggregate_common_doc
from .utils_numpy import aliasing, minimum_dtype, minimum_dtype_scalar
from .aggregate_numpy import _aggregate_base
def _anynan(group_idx, a, size, fill_value, dtype=None):
return _any(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _allnan(group_idx, a, size, fill_value, dtype=None):
return _all(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _any(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if fill_value:
ret[group_idx] = False # any-test should start from False
np.logical_or.at(ret, group_idx, a)
return ret
def _all(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if not fill_value:
ret[group_idx] = True # all-test should start from True
np.logical_and.at(ret, group_idx, a)
return ret
def _sum(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 0:
ret[group_idx] = 0 # sums should start at 0
np.add.at(ret, group_idx, a)
return ret
def _len(group_idx, a, size, fill_value, dtype=None):
return _sum(group_idx, 1, size, fill_value, dtype=int)
def _min(group_idx, a, size, fill_value, dtype=None):
"""Same as aggregate_numpy.py"""
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmax:
ret[group_idx] = dmax # min starts from maximum
np.minimum.at(ret, group_idx, a)
return ret
def _max(group_idx, a, size, fill_value, dtype=None):
"""Same as aggregate_numpy.py"""
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmin:
ret[group_idx] = dmin # max starts from minimum
np.maximum.at(ret, group_idx, a)
return ret
_impl_dict = dict(min=_min, max=_max, sum=_sum, prod=_prod, all=_all, any=_any,
allnan=_allnan, anynan=_anynan, len=_len)
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
raise NotImplementedError("No such ufunc available")
return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,
order=order, dtype=dtype, func=func, axis=axis,
_impl_dict=_impl_dict, _nansqueeze=False, **kwargs)
aggregate.__doc__ = """
Unlike ``aggregate_numpy``, which in most cases does some custom
optimisations, this version simply uses ``numpy``'s ``ufunc.at``.
As of version 1.14 this gives fairly poor performance. There should
normally be no need to use this version, it is intended to be used in
testing and benchmarking only.
""" + aggregate_common_doc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.