id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
158,821 | import copy
import random
import re
import shutil
import subprocess
import sys
import os
from datetime import timedelta
import json
import requests
from videotrans.configure import config
import time
if not config.params['gptsovits_role'].strip():
return None
from videotrans.util.playmp3 import AudioPlayer
from videotrans.separate import st
def get_clone_role(set_p=False):
if not config.params['clone_api']:
if set_p:
raise Exception(config.transobj['bixutianxiecloneapi'])
return False
try:
url=config.params['clone_api'].strip().rstrip('/')+"/init"
res=requests.get('http://'+url.replace('http://',''),proxies={"http":"","https":""})
if res.status_code==200:
config.clone_voicelist=["clone"]+res.json()
set_process('','set_clone_role')
return True
raise Exception(f"code={res.status_code},{config.transobj['You must deploy and start the clone-voice service']}")
except Exception as e:
if set_p:
raise Exception(f'clone-voice:{str(e)}')
return False | null |
158,822 | import copy
import random
import re
import shutil
import subprocess
import sys
import os
from datetime import timedelta
import json
import requests
from videotrans.configure import config
import time
from videotrans.util.playmp3 import AudioPlayer
if os.path.exists(jsonfile) and os.path.getsize(jsonfile) > 0:
with open(jsonfile, 'r', encoding='utf-8') as f:
cache = json.loads(f.read())
for it in cache.values():
namelist.append(it['name'])
from videotrans.separate import st
try:
path=os.path.dirname(targe_audio)
vocal_file=os.path.join(path,'vocal.wav')
if not os.path.exists(vocal_file):
set_process(config.transobj['Separating vocals and background music, which may take a longer time'])
try:
st.start(targe_audio,path)
except Exception as e:
msg=f"separate vocal and background music:{str(e)}"
set_process(msg)
raise Exception(msg)
if not os.path.exists(vocal_file):
return False
# 再将 vocal.wav 转为1通道,8000采样率,方便识别
runffmpeg([
"-y",
"-i",
vocal_file,
"-ac",
"1",
"-ar",
"8000",
os.path.join(path,'vocal8000.wav'),
])
except Exception as e:
print("end")
msg=f"separate vocal and background music:{str(e)}"
set_process(msg)
raise Exception(msg)
os.environ['temp'],f'{time.time()}.wav')
tmpm4a=os.path.join(os.environ["TEMP"] or os.environ['temp'],f'{time.time()}.m4a')
# 背景转为m4a文件,音量降低为0.8
wav2m4a(backwav,tmpm4a,["-filter:a",f"volume={config.settings['backaudio_volume']}"])
runffmpeg(['-y', '-i', peiyinm4a, '-i', tmpm4a, '-filter_complex',"[0:a][1:a]amix=inputs=2:duration=first:dropout_transition=2", '-ac', '2', tmpwav])
shutil.copy2(tmpwav,peiyinm4a)
# 转为 m4a
wav2m4a(wavfile, m4afile,extra=None):
cmd = [
"-y",
"-i",
wavfile,
"-c:a",
"aac",
m4afile
]
if extra:
cmd=cmd[:3]+extra+cmd[3:]
return runffmpeg(cmd,disable_gpu=True)
wav2mp3(wavfile, mp3file,extra=None):
cmd = [
"-y",
"-i",
wavfile,
mp3file
]
if extra:
cmd=cmd[:3]+extra+cmd[3:]
return runffmpeg(cmd,disable_gpu=True)
m4a2wav(m4afile, wavfile):
cmd = [
"-y",
"-i",
m4afile,
"-ac",
"1",
"-ar",
"8000",
"-b:a",
"128k",
"-c:a",
"pcm_s16le",
wavfile
]
return runffmpeg(cmd,disable_gpu=True)
_fromvideo(file_path, img):
return runffmpeg(
['-y', '-sseof', '-3', '-i', f'{file_path}', '-q:v', '1', '-qmin:v', '1', '-qmax:v', '1', '-update', 'true',
f'{img}'], de_format="nv12",use_run=True)
img=None, fps=30, scale=None, totime=None, out=None):
return runffmpeg([
'-loop', '1', '-i', f'{img}', '-vf', f'fps={fps},scale={scale[0]}:{scale[1]}', '-c:v', "libx264",
'-crf', f'{config.settings["crf"]}', '-to', f'{totime}', '-y', out], no_decode=True,de_format="nv12",use_run=True)
(filelist, filename):
txt = []
for it in filelist:
txt.append(f"file '{it}'")
with open(filename, 'w', encoding='utf-8') as f:
f.write("\n".join(txt))
return filename
i_mp4(*, filelist=[], out=None,maxsec=None):
# 创建txt文件
txt = config.TEMP_DIR + f"/{time.time()}.txt"
create_concat_txt(filelist, txt)
if maxsec:
return runffmpeg(['-y', '-f', 'concat', '-safe', '0', '-i', txt, '-c:v', "copy",'-t', f"{maxsec}", '-crf', f'{config.settings["crf"]}', '-an',out],use_run=True)
return runffmpeg(['-y', '-f', 'concat', '-safe', '0', '-i', txt, '-c:v', "copy", '-crf', f'{config.settings["crf"]}', '-an',out],use_run=True)
i_audio(*, filelist=[], out=None):
# 创建txt文件
txt = config.TEMP_DIR + f"/{time.time()}.txt"
create_concat_txt(filelist, txt)
return runffmpeg(['-y', '-f', 'concat', '-safe', '0', '-i', txt, '-c:a','aac',out],disable_gpu=True,use_run=True)
ed_up_mp3(*, filename=None, speed=1, out=None):
return runffmpeg([
"-y",
"-i",
filename,
"-af",
f'atempo={speed}',
out
],use_run=True)
t):
#去掉空行
content=[it for it in content if it.strip()]
if len(content)<1:
return []
result=[]
maxindex=len(content)-1
# 时间格式
timepat = r'^\s*?\d+:\d+:\d+([\,\.]\d*?)?\s*?-->\s*?\d+:\d+:\d+([\,\.]\d*?)?\s*?$'
textpat=r'^[,./?`!@#$%^&*()_+=\\|\[\]{}~\s \n-]*$'
#print(content)
for i,it in enumerate(content):
#当前空行跳过
if not it.strip():
continue
it=it.strip()
is_time=re.match(timepat,it)
#print(f'{i=},{it=}')
if is_time:
#print(f'\t是时间')
#当前行是时间格式,则添加
result.append({"time":it,"text":[]})
elif i==0:
#当前是第一行,并且不是时间格式,跳过
#print(f'\t是0行跳过')
continue
elif re.match(r'^\s*?\d+\s*?$',it) and i< maxindex and re.match(timepat,content[i+1]):
#当前不是时间格式,不是第一行,并且都是数字,并且下一行是时间格式,则当前是行号,跳过
#print(f'\t是行号')
continue
elif len(result)>0 and not re.match(textpat,it):
#当前不是时间格式,不是第一行,(不是行号),并且result中存在数据,则是内容,可加入最后一个数据
result[-1]['text'].append(it.capitalize())
#再次遍历,去掉text为空的
result=[it for it in result if len(it['text'])>0]
if len(result)>0:
for i,it in enumerate(result):
result[i]['line']=i+1
result[i]['text']="\n".join([tx.capitalize() for tx in it['text']])
s,e=(it['time'].replace('.',',')).split('-->')
s=format_time(s,',')
e=format_time(e,',')
result[i]['time']=f'{s} --> {e}'
return result
e="",separate=','):
if not s_time.strip():
return f'00:00:00{separate}000'
s_time=s_time.strip()
hou,min,sec="00","00",f"00{separate}000"
tmp=s_time.split(':')
if len(tmp)>=3:
hou=tmp[-3].strip()
min=tmp[-2].strip()
sec=tmp[-1].strip()
elif len(tmp)==2:
min=tmp[0].strip()
sec=tmp[1].strip()
elif len(tmp)==1:
sec=tmp[0].strip()
if re.search(r',|\.',str(sec)):
sec,ms=re.split(r',|\.',str(sec))
sec=sec.strip()
ms=ms.strip()
else:
ms='000'
hou=hou if hou!="" else "00"
if len(hou)<2:
hou=f'0{hou}'
hou=hou[-2:]
min=min if min!="" else "00"
if len(min)<2:
min=f'0{min}'
min=min[-2:]
sec=sec if sec!="" else "00"
if len(sec)<2:
sec=f'0{sec}'
sec=sec[-2:]
ms_len=len(ms)
if ms_len<3:
for i in range(3-ms_len):
ms=f'0{ms}'
ms=ms[-3:]
return f"{hou}:{min}:{sec}{separate}{ms}"
e_mp4(novoice_mp4, noextname):
# 预先创建好的
# 判断novoice_mp4是否完成
t = 0
if noextname not in config.queue_novice and os.path.exists(novoice_mp4) and os.path.getsize(novoice_mp4) > 0:
return True
if noextname in config.queue_novice and config.queue_novice[noextname] == 'end':
return True
last_size = 0
while True:
if config.current_status != 'ing':
raise Exception("stop")
if os.path.exists(novoice_mp4):
current_size = os.path.getsize(novoice_mp4)
if last_size > 0 and current_size == last_size and t > 600:
return True
last_size = current_size
if noextname not in config.queue_novice:
msg = f"{noextname} split no voice videoerror:{config.queue_novice=}"
raise Exception(msg)
if config.queue_novice[noextname] == 'error':
msg = f"{noextname} split no voice videoerror"
raise Exception(msg)
if config.queue_novice[noextname] == 'ing':
size = f'{round(last_size / 1024 / 1024, 2)}MB' if last_size > 0 else ""
set_process(f"{noextname} {'分离音频和画面' if config.defaulelang=='zh' else 'spilt audio and video'} {size}")
time.sleep(3)
t += 3
continue
return True
to="", source="", pts="", out=""):
cmd1 = [
"-y",
"-ss",
format_time(ss,'.')]
if to != '':
cmd1.append("-to")
cmd1.append(format_time(to,'.')) # 如果开始结束时间相同,则强制持续时间1s)
cmd1.append('-i')
cmd1.append(source)
if pts:
cmd1.append("-vf")
cmd1.append(f'setpts={pts}*PTS')
cmd = cmd1 + ["-c:v",
"libx264",
"-crf",
f'{config.settings["crf"]}',
'-an',
f'{out}'
]
return runffmpeg(cmd,use_run=True)
*,ss,to,audio_file,out_file):
cmd=[
"-y",
"-i",
audio_file,
"-ss",
format_time(ss,'.'),
"-to",
format_time(to,'.'),
"-ar",
"8000",
out_file
]
return runffmpeg(cmd)
_role(set_p=False):
if not config.params['clone_api']:
if set_p:
raise Exception(config.transobj['bixutianxiecloneapi'])
return False
try:
url=config.params['clone_api'].strip().rstrip('/')+"/init"
res=requests.get('http://'+url.replace('http://',''),proxies={"http":"","https":""})
if res.status_code==200:
config.clone_voicelist=["clone"]+res.json()
set_process('','set_clone_role')
return True
raise Exception(f"code={res.status_code},{config.transobj['You must deploy and start the clone-voice service']}")
except Exception as e:
if set_p:
raise Exception(f'clone-voice:{str(e)}')
return False
ox(text, type='logs',*,func_name=""):
set_process(text, type, qname="box",func_name=func_name)
type="logs",*,qname='sp',func_name="",btnkey=None):
try:
if text:
log_msg = text.strip()
if log_msg.startswith("[error"):
config.logger.error(log_msg)
else:
config.logger.info(log_msg)
if qname == 'sp':
config.queue_logs.put_nowait({"text": text, "type": type,"btnkey": btnkey if btnkey else config.btnkey})
elif qname=='box':
config.queuebox_logs.put_nowait({"text": text, "type": type,"func_name":func_name})
else:
print(f'[{type}]: {text}')
except Exception as e:
pass
ext):
try:
files_and_dirs = os.listdir(directory)
# 遍历文件和子目录
for item in files_and_dirs:
item_path = os.path.join(directory, item)
# 如果是文件,且是 mp3 文件,删除之
if os.path.isfile(item_path) and item.lower().endswith(ext):
os.remove(item_path)
print(f"Deleted: {item_path}")
# 如果是子目录,递归调用删除函数
elif os.path.isdir(item_path):
delete_files(item_path)
except:
pass
patter=r'[ \s`"\'!@#$%^&*()=+,?\|{}\[\]]+'
if re.search(patter,file):
if is_dir:
os.makedirs(config.homedir+"/target_dir",exist_ok=True)
return True,config.homedir+"/target_dir",False
dirname=os.path.dirname(file)
basename=os.path.basename(file)
#目录不规则,迁移目录
if re.search(patter,dirname):
basename=re.sub(patter,'',basename,0,re.I)
basename=basename.replace(':','')
os.makedirs(config.homedir+"/rename",exist_ok=True)
newfile=config.homedir+f"/rename/{basename}"
shutil.copy2(file,newfile)
else:
#目录规则仅名称不规则,只修改名称
basename=re.sub(patter,'',basename,0,re.I)
basename=basename.replace(':','')
newfile=dirname+"/"+basename
shutil.copy2(file,newfile)
return True,newfile,basename
return False,False,False
io_time(audio_file):
# 如果存在缓存并且没有禁用缓存
out = runffprobe(['-v','quiet','-print_format','json','-show_format','-show_streams',audio_file])
if out is False:
raise Exception(f'ffprobe error:dont get video information')
out = json.loads(out)
return float(out['format']['duration'])
url():
google_url='https://translate.google.com'
if os.path.exists(os.path.join(config.rootdir,'google.txt')):
with open(os.path.join(config.rootdir,'google.txt'),'r') as f:
t=f.read().strip().splitlines()
urls=[x for x in t if x.strip() and x.startswith('http')]
if len(urls)>0:
n=0
while n<5:
google_url=random.choice(urls).rstrip('/')
try:
res=requests.head(google_url,proxies={"http":"","https":""})
if res.status_code==200:
return google_url
except:
msg=(f'测试失败: {google_url}')
config.logger.error(msg)
continue
finally:
n+=1
raise Exception(f'从google.txt中随机获取5次url,均未找到可用的google翻译反代地址,请检查')
return google_url
def delete_files(directory, ext):
try:
files_and_dirs = os.listdir(directory)
# 遍历文件和子目录
for item in files_and_dirs:
item_path = os.path.join(directory, item)
# 如果是文件,且是 mp3 文件,删除之
if os.path.isfile(item_path) and item.lower().endswith(ext):
os.remove(item_path)
print(f"Deleted: {item_path}")
# 如果是子目录,递归调用删除函数
elif os.path.isdir(item_path):
delete_files(item_path)
except:
pass | null |
158,823 | import copy
import random
import re
import shutil
import subprocess
import sys
import os
from datetime import timedelta
import json
import requests
from videotrans.configure import config
import time
if not config.params['gptsovits_role'].strip():
return None
from videotrans.util.playmp3 import AudioPlayer
if os.path.exists(jsonfile) and os.path.getsize(jsonfile) > 0:
with open(jsonfile, 'r', encoding='utf-8') as f:
cache = json.loads(f.read())
for it in cache.values():
namelist.append(it['name'])
from videotrans.separate import st
try:
path=os.path.dirname(targe_audio)
vocal_file=os.path.join(path,'vocal.wav')
if not os.path.exists(vocal_file):
set_process(config.transobj['Separating vocals and background music, which may take a longer time'])
try:
st.start(targe_audio,path)
except Exception as e:
msg=f"separate vocal and background music:{str(e)}"
set_process(msg)
raise Exception(msg)
if not os.path.exists(vocal_file):
return False
# 再将 vocal.wav 转为1通道,8000采样率,方便识别
runffmpeg([
"-y",
"-i",
vocal_file,
"-ac",
"1",
"-ar",
"8000",
os.path.join(path,'vocal8000.wav'),
])
except Exception as e:
print("end")
msg=f"separate vocal and background music:{str(e)}"
set_process(msg)
raise Exception(msg)
os.environ['temp'],f'{time.time()}.wav')
tmpm4a=os.path.join(os.environ["TEMP"] or os.environ['temp'],f'{time.time()}.m4a')
# 背景转为m4a文件,音量降低为0.8
wav2m4a(backwav,tmpm4a,["-filter:a",f"volume={config.settings['backaudio_volume']}"])
runffmpeg(['-y', '-i', peiyinm4a, '-i', tmpm4a, '-filter_complex',"[0:a][1:a]amix=inputs=2:duration=first:dropout_transition=2", '-ac', '2', tmpwav])
shutil.copy2(tmpwav,peiyinm4a)
# 转为 m4a
wav2m4a(wavfile, m4afile,extra=None):
cmd = [
"-y",
"-i",
wavfile,
"-c:a",
"aac",
m4afile
]
if extra:
cmd=cmd[:3]+extra+cmd[3:]
return runffmpeg(cmd,disable_gpu=True)
wav2mp3(wavfile, mp3file,extra=None):
cmd = [
"-y",
"-i",
wavfile,
mp3file
]
if extra:
cmd=cmd[:3]+extra+cmd[3:]
return runffmpeg(cmd,disable_gpu=True)
m4a2wav(m4afile, wavfile):
cmd = [
"-y",
"-i",
m4afile,
"-ac",
"1",
"-ar",
"8000",
"-b:a",
"128k",
"-c:a",
"pcm_s16le",
wavfile
]
return runffmpeg(cmd,disable_gpu=True)
_fromvideo(file_path, img):
return runffmpeg(
['-y', '-sseof', '-3', '-i', f'{file_path}', '-q:v', '1', '-qmin:v', '1', '-qmax:v', '1', '-update', 'true',
f'{img}'], de_format="nv12",use_run=True)
img=None, fps=30, scale=None, totime=None, out=None):
return runffmpeg([
'-loop', '1', '-i', f'{img}', '-vf', f'fps={fps},scale={scale[0]}:{scale[1]}', '-c:v', "libx264",
'-crf', f'{config.settings["crf"]}', '-to', f'{totime}', '-y', out], no_decode=True,de_format="nv12",use_run=True)
(filelist, filename):
txt = []
for it in filelist:
txt.append(f"file '{it}'")
with open(filename, 'w', encoding='utf-8') as f:
f.write("\n".join(txt))
return filename
i_mp4(*, filelist=[], out=None,maxsec=None):
# 创建txt文件
txt = config.TEMP_DIR + f"/{time.time()}.txt"
create_concat_txt(filelist, txt)
if maxsec:
return runffmpeg(['-y', '-f', 'concat', '-safe', '0', '-i', txt, '-c:v', "copy",'-t', f"{maxsec}", '-crf', f'{config.settings["crf"]}', '-an',out],use_run=True)
return runffmpeg(['-y', '-f', 'concat', '-safe', '0', '-i', txt, '-c:v', "copy", '-crf', f'{config.settings["crf"]}', '-an',out],use_run=True)
i_audio(*, filelist=[], out=None):
# 创建txt文件
txt = config.TEMP_DIR + f"/{time.time()}.txt"
create_concat_txt(filelist, txt)
return runffmpeg(['-y', '-f', 'concat', '-safe', '0', '-i', txt, '-c:a','aac',out],disable_gpu=True,use_run=True)
ed_up_mp3(*, filename=None, speed=1, out=None):
return runffmpeg([
"-y",
"-i",
filename,
"-af",
f'atempo={speed}',
out
],use_run=True)
t):
#去掉空行
content=[it for it in content if it.strip()]
if len(content)<1:
return []
result=[]
maxindex=len(content)-1
# 时间格式
timepat = r'^\s*?\d+:\d+:\d+([\,\.]\d*?)?\s*?-->\s*?\d+:\d+:\d+([\,\.]\d*?)?\s*?$'
textpat=r'^[,./?`!@#$%^&*()_+=\\|\[\]{}~\s \n-]*$'
#print(content)
for i,it in enumerate(content):
#当前空行跳过
if not it.strip():
continue
it=it.strip()
is_time=re.match(timepat,it)
#print(f'{i=},{it=}')
if is_time:
#print(f'\t是时间')
#当前行是时间格式,则添加
result.append({"time":it,"text":[]})
elif i==0:
#当前是第一行,并且不是时间格式,跳过
#print(f'\t是0行跳过')
continue
elif re.match(r'^\s*?\d+\s*?$',it) and i< maxindex and re.match(timepat,content[i+1]):
#当前不是时间格式,不是第一行,并且都是数字,并且下一行是时间格式,则当前是行号,跳过
#print(f'\t是行号')
continue
elif len(result)>0 and not re.match(textpat,it):
#当前不是时间格式,不是第一行,(不是行号),并且result中存在数据,则是内容,可加入最后一个数据
result[-1]['text'].append(it.capitalize())
#再次遍历,去掉text为空的
result=[it for it in result if len(it['text'])>0]
if len(result)>0:
for i,it in enumerate(result):
result[i]['line']=i+1
result[i]['text']="\n".join([tx.capitalize() for tx in it['text']])
s,e=(it['time'].replace('.',',')).split('-->')
s=format_time(s,',')
e=format_time(e,',')
result[i]['time']=f'{s} --> {e}'
return result
e="",separate=','):
if not s_time.strip():
return f'00:00:00{separate}000'
s_time=s_time.strip()
hou,min,sec="00","00",f"00{separate}000"
tmp=s_time.split(':')
if len(tmp)>=3:
hou=tmp[-3].strip()
min=tmp[-2].strip()
sec=tmp[-1].strip()
elif len(tmp)==2:
min=tmp[0].strip()
sec=tmp[1].strip()
elif len(tmp)==1:
sec=tmp[0].strip()
if re.search(r',|\.',str(sec)):
sec,ms=re.split(r',|\.',str(sec))
sec=sec.strip()
ms=ms.strip()
else:
ms='000'
hou=hou if hou!="" else "00"
if len(hou)<2:
hou=f'0{hou}'
hou=hou[-2:]
min=min if min!="" else "00"
if len(min)<2:
min=f'0{min}'
min=min[-2:]
sec=sec if sec!="" else "00"
if len(sec)<2:
sec=f'0{sec}'
sec=sec[-2:]
ms_len=len(ms)
if ms_len<3:
for i in range(3-ms_len):
ms=f'0{ms}'
ms=ms[-3:]
return f"{hou}:{min}:{sec}{separate}{ms}"
e_mp4(novoice_mp4, noextname):
# 预先创建好的
# 判断novoice_mp4是否完成
t = 0
if noextname not in config.queue_novice and os.path.exists(novoice_mp4) and os.path.getsize(novoice_mp4) > 0:
return True
if noextname in config.queue_novice and config.queue_novice[noextname] == 'end':
return True
last_size = 0
while True:
if config.current_status != 'ing':
raise Exception("stop")
if os.path.exists(novoice_mp4):
current_size = os.path.getsize(novoice_mp4)
if last_size > 0 and current_size == last_size and t > 600:
return True
last_size = current_size
if noextname not in config.queue_novice:
msg = f"{noextname} split no voice videoerror:{config.queue_novice=}"
raise Exception(msg)
if config.queue_novice[noextname] == 'error':
msg = f"{noextname} split no voice videoerror"
raise Exception(msg)
if config.queue_novice[noextname] == 'ing':
size = f'{round(last_size / 1024 / 1024, 2)}MB' if last_size > 0 else ""
set_process(f"{noextname} {'分离音频和画面' if config.defaulelang=='zh' else 'spilt audio and video'} {size}")
time.sleep(3)
t += 3
continue
return True
to="", source="", pts="", out=""):
cmd1 = [
"-y",
"-ss",
format_time(ss,'.')]
if to != '':
cmd1.append("-to")
cmd1.append(format_time(to,'.')) # 如果开始结束时间相同,则强制持续时间1s)
cmd1.append('-i')
cmd1.append(source)
if pts:
cmd1.append("-vf")
cmd1.append(f'setpts={pts}*PTS')
cmd = cmd1 + ["-c:v",
"libx264",
"-crf",
f'{config.settings["crf"]}',
'-an',
f'{out}'
]
return runffmpeg(cmd,use_run=True)
*,ss,to,audio_file,out_file):
cmd=[
"-y",
"-i",
audio_file,
"-ss",
format_time(ss,'.'),
"-to",
format_time(to,'.'),
"-ar",
"8000",
out_file
]
return runffmpeg(cmd)
_role(set_p=False):
if not config.params['clone_api']:
if set_p:
raise Exception(config.transobj['bixutianxiecloneapi'])
return False
try:
url=config.params['clone_api'].strip().rstrip('/')+"/init"
res=requests.get('http://'+url.replace('http://',''),proxies={"http":"","https":""})
if res.status_code==200:
config.clone_voicelist=["clone"]+res.json()
set_process('','set_clone_role')
return True
raise Exception(f"code={res.status_code},{config.transobj['You must deploy and start the clone-voice service']}")
except Exception as e:
if set_p:
raise Exception(f'clone-voice:{str(e)}')
return False
ox(text, type='logs',*,func_name=""):
set_process(text, type, qname="box",func_name=func_name)
type="logs",*,qname='sp',func_name="",btnkey=None):
try:
if text:
log_msg = text.strip()
if log_msg.startswith("[error"):
config.logger.error(log_msg)
else:
config.logger.info(log_msg)
if qname == 'sp':
config.queue_logs.put_nowait({"text": text, "type": type,"btnkey": btnkey if btnkey else config.btnkey})
elif qname=='box':
config.queuebox_logs.put_nowait({"text": text, "type": type,"func_name":func_name})
else:
print(f'[{type}]: {text}')
except Exception as e:
pass
ext):
try:
files_and_dirs = os.listdir(directory)
# 遍历文件和子目录
for item in files_and_dirs:
item_path = os.path.join(directory, item)
# 如果是文件,且是 mp3 文件,删除之
if os.path.isfile(item_path) and item.lower().endswith(ext):
os.remove(item_path)
print(f"Deleted: {item_path}")
# 如果是子目录,递归调用删除函数
elif os.path.isdir(item_path):
delete_files(item_path)
except:
pass
patter=r'[ \s`"\'!@#$%^&*()=+,?\|{}\[\]]+'
if re.search(patter,file):
if is_dir:
os.makedirs(config.homedir+"/target_dir",exist_ok=True)
return True,config.homedir+"/target_dir",False
dirname=os.path.dirname(file)
basename=os.path.basename(file)
#目录不规则,迁移目录
if re.search(patter,dirname):
basename=re.sub(patter,'',basename,0,re.I)
basename=basename.replace(':','')
os.makedirs(config.homedir+"/rename",exist_ok=True)
newfile=config.homedir+f"/rename/{basename}"
shutil.copy2(file,newfile)
else:
#目录规则仅名称不规则,只修改名称
basename=re.sub(patter,'',basename,0,re.I)
basename=basename.replace(':','')
newfile=dirname+"/"+basename
shutil.copy2(file,newfile)
return True,newfile,basename
return False,False,False
io_time(audio_file):
# 如果存在缓存并且没有禁用缓存
out = runffprobe(['-v','quiet','-print_format','json','-show_format','-show_streams',audio_file])
if out is False:
raise Exception(f'ffprobe error:dont get video information')
out = json.loads(out)
return float(out['format']['duration'])
url():
google_url='https://translate.google.com'
if os.path.exists(os.path.join(config.rootdir,'google.txt')):
with open(os.path.join(config.rootdir,'google.txt'),'r') as f:
t=f.read().strip().splitlines()
urls=[x for x in t if x.strip() and x.startswith('http')]
if len(urls)>0:
n=0
while n<5:
google_url=random.choice(urls).rstrip('/')
try:
res=requests.head(google_url,proxies={"http":"","https":""})
if res.status_code==200:
return google_url
except:
msg=(f'测试失败: {google_url}')
config.logger.error(msg)
continue
finally:
n+=1
raise Exception(f'从google.txt中随机获取5次url,均未找到可用的google翻译反代地址,请检查')
return google_url
def send_notification(title, message):
from plyer import notification
try:
notification.notify(
title=title[:60],
message=message[:120],
ticker="视频翻译与配音",
app_name="视频翻译与配音",#config.uilanglist['SP-video Translate Dubbing'],
app_icon=os.path.join(config.rootdir,'videotrans/styles/icon.ico'),
timeout=10 # Display duration in seconds
)
except:
pass | null |
158,824 | import copy
import random
import re
import shutil
import subprocess
import sys
import os
from datetime import timedelta
import json
import requests
from videotrans.configure import config
import time
if not config.params['gptsovits_role'].strip():
return None
from videotrans.util.playmp3 import AudioPlayer
if os.path.exists(jsonfile) and os.path.getsize(jsonfile) > 0:
with open(jsonfile, 'r', encoding='utf-8') as f:
cache = json.loads(f.read())
for it in cache.values():
namelist.append(it['name'])
from videotrans.separate import st
try:
path=os.path.dirname(targe_audio)
vocal_file=os.path.join(path,'vocal.wav')
if not os.path.exists(vocal_file):
set_process(config.transobj['Separating vocals and background music, which may take a longer time'])
try:
st.start(targe_audio,path)
except Exception as e:
msg=f"separate vocal and background music:{str(e)}"
set_process(msg)
raise Exception(msg)
if not os.path.exists(vocal_file):
return False
# 再将 vocal.wav 转为1通道,8000采样率,方便识别
runffmpeg([
"-y",
"-i",
vocal_file,
"-ac",
"1",
"-ar",
"8000",
os.path.join(path,'vocal8000.wav'),
])
except Exception as e:
print("end")
msg=f"separate vocal and background music:{str(e)}"
set_process(msg)
raise Exception(msg)
os.environ['temp'],f'{time.time()}.wav')
tmpm4a=os.path.join(os.environ["TEMP"] or os.environ['temp'],f'{time.time()}.m4a')
# 背景转为m4a文件,音量降低为0.8
wav2m4a(backwav,tmpm4a,["-filter:a",f"volume={config.settings['backaudio_volume']}"])
runffmpeg(['-y', '-i', peiyinm4a, '-i', tmpm4a, '-filter_complex',"[0:a][1:a]amix=inputs=2:duration=first:dropout_transition=2", '-ac', '2', tmpwav])
shutil.copy2(tmpwav,peiyinm4a)
# 转为 m4a
wav2m4a(wavfile, m4afile,extra=None):
cmd = [
"-y",
"-i",
wavfile,
"-c:a",
"aac",
m4afile
]
if extra:
cmd=cmd[:3]+extra+cmd[3:]
return runffmpeg(cmd,disable_gpu=True)
wav2mp3(wavfile, mp3file,extra=None):
cmd = [
"-y",
"-i",
wavfile,
mp3file
]
if extra:
cmd=cmd[:3]+extra+cmd[3:]
return runffmpeg(cmd,disable_gpu=True)
m4a2wav(m4afile, wavfile):
cmd = [
"-y",
"-i",
m4afile,
"-ac",
"1",
"-ar",
"8000",
"-b:a",
"128k",
"-c:a",
"pcm_s16le",
wavfile
]
return runffmpeg(cmd,disable_gpu=True)
_fromvideo(file_path, img):
return runffmpeg(
['-y', '-sseof', '-3', '-i', f'{file_path}', '-q:v', '1', '-qmin:v', '1', '-qmax:v', '1', '-update', 'true',
f'{img}'], de_format="nv12",use_run=True)
img=None, fps=30, scale=None, totime=None, out=None):
return runffmpeg([
'-loop', '1', '-i', f'{img}', '-vf', f'fps={fps},scale={scale[0]}:{scale[1]}', '-c:v', "libx264",
'-crf', f'{config.settings["crf"]}', '-to', f'{totime}', '-y', out], no_decode=True,de_format="nv12",use_run=True)
(filelist, filename):
txt = []
for it in filelist:
txt.append(f"file '{it}'")
with open(filename, 'w', encoding='utf-8') as f:
f.write("\n".join(txt))
return filename
i_mp4(*, filelist=[], out=None,maxsec=None):
# 创建txt文件
txt = config.TEMP_DIR + f"/{time.time()}.txt"
create_concat_txt(filelist, txt)
if maxsec:
return runffmpeg(['-y', '-f', 'concat', '-safe', '0', '-i', txt, '-c:v', "copy",'-t', f"{maxsec}", '-crf', f'{config.settings["crf"]}', '-an',out],use_run=True)
return runffmpeg(['-y', '-f', 'concat', '-safe', '0', '-i', txt, '-c:v', "copy", '-crf', f'{config.settings["crf"]}', '-an',out],use_run=True)
i_audio(*, filelist=[], out=None):
# 创建txt文件
txt = config.TEMP_DIR + f"/{time.time()}.txt"
create_concat_txt(filelist, txt)
return runffmpeg(['-y', '-f', 'concat', '-safe', '0', '-i', txt, '-c:a','aac',out],disable_gpu=True,use_run=True)
ed_up_mp3(*, filename=None, speed=1, out=None):
return runffmpeg([
"-y",
"-i",
filename,
"-af",
f'atempo={speed}',
out
],use_run=True)
t):
#去掉空行
content=[it for it in content if it.strip()]
if len(content)<1:
return []
result=[]
maxindex=len(content)-1
# 时间格式
timepat = r'^\s*?\d+:\d+:\d+([\,\.]\d*?)?\s*?-->\s*?\d+:\d+:\d+([\,\.]\d*?)?\s*?$'
textpat=r'^[,./?`!@#$%^&*()_+=\\|\[\]{}~\s \n-]*$'
#print(content)
for i,it in enumerate(content):
#当前空行跳过
if not it.strip():
continue
it=it.strip()
is_time=re.match(timepat,it)
#print(f'{i=},{it=}')
if is_time:
#print(f'\t是时间')
#当前行是时间格式,则添加
result.append({"time":it,"text":[]})
elif i==0:
#当前是第一行,并且不是时间格式,跳过
#print(f'\t是0行跳过')
continue
elif re.match(r'^\s*?\d+\s*?$',it) and i< maxindex and re.match(timepat,content[i+1]):
#当前不是时间格式,不是第一行,并且都是数字,并且下一行是时间格式,则当前是行号,跳过
#print(f'\t是行号')
continue
elif len(result)>0 and not re.match(textpat,it):
#当前不是时间格式,不是第一行,(不是行号),并且result中存在数据,则是内容,可加入最后一个数据
result[-1]['text'].append(it.capitalize())
#再次遍历,去掉text为空的
result=[it for it in result if len(it['text'])>0]
if len(result)>0:
for i,it in enumerate(result):
result[i]['line']=i+1
result[i]['text']="\n".join([tx.capitalize() for tx in it['text']])
s,e=(it['time'].replace('.',',')).split('-->')
s=format_time(s,',')
e=format_time(e,',')
result[i]['time']=f'{s} --> {e}'
return result
e="",separate=','):
if not s_time.strip():
return f'00:00:00{separate}000'
s_time=s_time.strip()
hou,min,sec="00","00",f"00{separate}000"
tmp=s_time.split(':')
if len(tmp)>=3:
hou=tmp[-3].strip()
min=tmp[-2].strip()
sec=tmp[-1].strip()
elif len(tmp)==2:
min=tmp[0].strip()
sec=tmp[1].strip()
elif len(tmp)==1:
sec=tmp[0].strip()
if re.search(r',|\.',str(sec)):
sec,ms=re.split(r',|\.',str(sec))
sec=sec.strip()
ms=ms.strip()
else:
ms='000'
hou=hou if hou!="" else "00"
if len(hou)<2:
hou=f'0{hou}'
hou=hou[-2:]
min=min if min!="" else "00"
if len(min)<2:
min=f'0{min}'
min=min[-2:]
sec=sec if sec!="" else "00"
if len(sec)<2:
sec=f'0{sec}'
sec=sec[-2:]
ms_len=len(ms)
if ms_len<3:
for i in range(3-ms_len):
ms=f'0{ms}'
ms=ms[-3:]
return f"{hou}:{min}:{sec}{separate}{ms}"
e_mp4(novoice_mp4, noextname):
# 预先创建好的
# 判断novoice_mp4是否完成
t = 0
if noextname not in config.queue_novice and os.path.exists(novoice_mp4) and os.path.getsize(novoice_mp4) > 0:
return True
if noextname in config.queue_novice and config.queue_novice[noextname] == 'end':
return True
last_size = 0
while True:
if config.current_status != 'ing':
raise Exception("stop")
if os.path.exists(novoice_mp4):
current_size = os.path.getsize(novoice_mp4)
if last_size > 0 and current_size == last_size and t > 600:
return True
last_size = current_size
if noextname not in config.queue_novice:
msg = f"{noextname} split no voice videoerror:{config.queue_novice=}"
raise Exception(msg)
if config.queue_novice[noextname] == 'error':
msg = f"{noextname} split no voice videoerror"
raise Exception(msg)
if config.queue_novice[noextname] == 'ing':
size = f'{round(last_size / 1024 / 1024, 2)}MB' if last_size > 0 else ""
set_process(f"{noextname} {'分离音频和画面' if config.defaulelang=='zh' else 'spilt audio and video'} {size}")
time.sleep(3)
t += 3
continue
return True
to="", source="", pts="", out=""):
cmd1 = [
"-y",
"-ss",
format_time(ss,'.')]
if to != '':
cmd1.append("-to")
cmd1.append(format_time(to,'.')) # 如果开始结束时间相同,则强制持续时间1s)
cmd1.append('-i')
cmd1.append(source)
if pts:
cmd1.append("-vf")
cmd1.append(f'setpts={pts}*PTS')
cmd = cmd1 + ["-c:v",
"libx264",
"-crf",
f'{config.settings["crf"]}',
'-an',
f'{out}'
]
return runffmpeg(cmd,use_run=True)
*,ss,to,audio_file,out_file):
cmd=[
"-y",
"-i",
audio_file,
"-ss",
format_time(ss,'.'),
"-to",
format_time(to,'.'),
"-ar",
"8000",
out_file
]
return runffmpeg(cmd)
_role(set_p=False):
if not config.params['clone_api']:
if set_p:
raise Exception(config.transobj['bixutianxiecloneapi'])
return False
try:
url=config.params['clone_api'].strip().rstrip('/')+"/init"
res=requests.get('http://'+url.replace('http://',''),proxies={"http":"","https":""})
if res.status_code==200:
config.clone_voicelist=["clone"]+res.json()
set_process('','set_clone_role')
return True
raise Exception(f"code={res.status_code},{config.transobj['You must deploy and start the clone-voice service']}")
except Exception as e:
if set_p:
raise Exception(f'clone-voice:{str(e)}')
return False
ox(text, type='logs',*,func_name=""):
set_process(text, type, qname="box",func_name=func_name)
type="logs",*,qname='sp',func_name="",btnkey=None):
try:
if text:
log_msg = text.strip()
if log_msg.startswith("[error"):
config.logger.error(log_msg)
else:
config.logger.info(log_msg)
if qname == 'sp':
config.queue_logs.put_nowait({"text": text, "type": type,"btnkey": btnkey if btnkey else config.btnkey})
elif qname=='box':
config.queuebox_logs.put_nowait({"text": text, "type": type,"func_name":func_name})
else:
print(f'[{type}]: {text}')
except Exception as e:
pass
ext):
try:
files_and_dirs = os.listdir(directory)
# 遍历文件和子目录
for item in files_and_dirs:
item_path = os.path.join(directory, item)
# 如果是文件,且是 mp3 文件,删除之
if os.path.isfile(item_path) and item.lower().endswith(ext):
os.remove(item_path)
print(f"Deleted: {item_path}")
# 如果是子目录,递归调用删除函数
elif os.path.isdir(item_path):
delete_files(item_path)
except:
pass
patter=r'[ \s`"\'!@#$%^&*()=+,?\|{}\[\]]+'
if re.search(patter,file):
if is_dir:
os.makedirs(config.homedir+"/target_dir",exist_ok=True)
return True,config.homedir+"/target_dir",False
dirname=os.path.dirname(file)
basename=os.path.basename(file)
#目录不规则,迁移目录
if re.search(patter,dirname):
basename=re.sub(patter,'',basename,0,re.I)
basename=basename.replace(':','')
os.makedirs(config.homedir+"/rename",exist_ok=True)
newfile=config.homedir+f"/rename/{basename}"
shutil.copy2(file,newfile)
else:
#目录规则仅名称不规则,只修改名称
basename=re.sub(patter,'',basename,0,re.I)
basename=basename.replace(':','')
newfile=dirname+"/"+basename
shutil.copy2(file,newfile)
return True,newfile,basename
return False,False,False
io_time(audio_file):
# 如果存在缓存并且没有禁用缓存
out = runffprobe(['-v','quiet','-print_format','json','-show_format','-show_streams',audio_file])
if out is False:
raise Exception(f'ffprobe error:dont get video information')
out = json.loads(out)
return float(out['format']['duration'])
url():
google_url='https://translate.google.com'
if os.path.exists(os.path.join(config.rootdir,'google.txt')):
with open(os.path.join(config.rootdir,'google.txt'),'r') as f:
t=f.read().strip().splitlines()
urls=[x for x in t if x.strip() and x.startswith('http')]
if len(urls)>0:
n=0
while n<5:
google_url=random.choice(urls).rstrip('/')
try:
res=requests.head(google_url,proxies={"http":"","https":""})
if res.status_code==200:
return google_url
except:
msg=(f'测试失败: {google_url}')
config.logger.error(msg)
continue
finally:
n+=1
raise Exception(f'从google.txt中随机获取5次url,均未找到可用的google翻译反代地址,请检查')
return google_url
def rename_move(file,*,is_dir=False):
patter=r'[ \s`"\'!@#$%^&*()=+,?\|{}\[\]]+'
if re.search(patter,file):
if is_dir:
os.makedirs(config.homedir+"/target_dir",exist_ok=True)
return True,config.homedir+"/target_dir",False
dirname=os.path.dirname(file)
basename=os.path.basename(file)
#目录不规则,迁移目录
if re.search(patter,dirname):
basename=re.sub(patter,'',basename,0,re.I)
basename=basename.replace(':','')
os.makedirs(config.homedir+"/rename",exist_ok=True)
newfile=config.homedir+f"/rename/{basename}"
shutil.copy2(file,newfile)
else:
#目录规则仅名称不规则,只修改名称
basename=re.sub(patter,'',basename,0,re.I)
basename=basename.replace(':','')
newfile=dirname+"/"+basename
shutil.copy2(file,newfile)
return True,newfile,basename
return False,False,False | null |
158,825 | import copy
import random
import re
import shutil
import subprocess
import sys
import os
from datetime import timedelta
import json
import requests
from videotrans.configure import config
import time
from videotrans.util.playmp3 import AudioPlayer
if os.path.exists(jsonfile) and os.path.getsize(jsonfile) > 0:
with open(jsonfile, 'r', encoding='utf-8') as f:
cache = json.loads(f.read())
for it in cache.values():
namelist.append(it['name'])
from videotrans.separate import st
def kill_ffmpeg_processes():
import platform
import signal
import getpass
try:
system_platform = platform.system()
current_user = getpass.getuser()
if system_platform == "Windows":
subprocess.call(f"taskkill /F /FI \"USERNAME eq {current_user}\" /IM ffmpeg.exe", shell=True)
elif system_platform == "Linux" or system_platform == "Darwin":
process = subprocess.Popen(['ps', '-U', current_user], stdout=subprocess.PIPE)
out, err = process.communicate()
for line in out.splitlines():
if b'ffmpeg' in line:
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
except:
pass | null |
158,826 | import pandas as pd
import os
from airtable import Airtable
from siuba.ops.support import spec
from siuba import _, select, inner_join, unnest, mutate, left_join, filter, pipe
def strip_dict_nans(d):
return {k: v for k, v in d.items() if not pd.isna(v)} | null |
158,827 | import pandas as pd
import os
from airtable import Airtable
from siuba.ops.support import spec
from siuba import _, select, inner_join, unnest, mutate, left_join, filter, pipe
def get_backend_records(method_name, backends):
return [
{
"method_name": method_name,
"name": k,
**v,
}
for k, v in backends.items()
] | null |
158,828 | import pandas as pd
import os
from airtable import Airtable
from siuba.ops.support import spec
from siuba import _, select, inner_join, unnest, mutate, left_join, filter, pipe
def record_needs_update(src, dst):
dst = dst.copy()
if 'is_property' in src:
dst.setdefault('is_property', False)
if set(src) - set(dst):
return True
return [dst[k] for k in src] != list(src.values()) | null |
158,829 | import itertools
import operator
from abc import ABC
from collections.abc import Mapping
class Call:
def __init__(self, func, *args, **kwargs):
def __repr__(self):
def __call__(self, x):
def __rshift__(self, x):
def __rrshift__(self, x):
def evaluate_calls(arg, x):
def copy(self) -> "Call":
def map_subcalls(self, f, args = tuple(), kwargs = None):
def map_replace(self, f):
def iter_subcalls(self, f):
def op_vars(self, attr_calls = True):
def _get_op_var(self):
def obj_name(self):
def _construct_pipe(lhs, rhs):
class MetaArg(Call):
def __init__(self, func, *args, **kwargs):
def __repr__(self):
def __call__(self, x):
def str_to_getitem_call(x):
return Call("__getitem__", MetaArg("_"), x) | null |
158,830 | from typing import Any, Union, TypeVar
import inspect
def is_union(x):
return getattr(x, '__origin__', None) is Union
def get_union_args(x):
return getattr(x, '__args__', getattr(x, '__union_args__', None))
def is_flex_subclass(x, cls):
if x is Any:
return True
return issubclass(x, cls)
The provided code snippet includes necessary dependencies for implementing the `is_dispatch_func_subtype` function. Write a Python function `def is_dispatch_func_subtype(f, input_cls, output_cls)` to solve the following problem:
Returns whether a singledispatch function is subtype of some input and result class. A function is a subtype if it is input contravariant, and result covariant. Rules for evaluating return types <= output_cls: * Any always returns True * Union[A, B] returns true if either A or B are covariant * f(arg_name:TypeVar) -> TypeVar compares input_cls and output_cls * Simple return types checked via issubclass Args: input_cls - input class for first argument to function output_cls - output class for function result
Here is the function:
def is_dispatch_func_subtype(f, input_cls, output_cls):
"""Returns whether a singledispatch function is subtype of some input and result class.
A function is a subtype if it is input contravariant, and result covariant.
Rules for evaluating return types <= output_cls:
* Any always returns True
* Union[A, B] returns true if either A or B are covariant
* f(arg_name:TypeVar) -> TypeVar compares input_cls and output_cls
* Simple return types checked via issubclass
Args:
input_cls - input class for first argument to function
output_cls - output class for function result
"""
sig = inspect.signature(f)
# result annotation
res_type = sig.return_annotation
# first parameter annotation
par0 = next(iter(sig.parameters.values()))
par_type0 = par0.annotation
# Case 1: no annotation
if res_type is None:
return False
# Case 2: fancy annotations: Union, generic TypeVar
if is_union(res_type) and get_union_args(res_type):
sub_types = get_union_args(res_type)
# passes if any unioned types are subclasses
return any(map(lambda x: is_flex_subclass(x, output_cls), sub_types))
elif isinstance(res_type, TypeVar):
if res_type == par_type0:
# using a generic type variable as first arg and result
# return type must be covariant on input_cls
return issubclass(input_cls, output_cls) and res_type.__covariant__
else:
raise TypeError("Generic type used as result, but not as first parameter")
return is_flex_subclass(res_type, output_cls) | Returns whether a singledispatch function is subtype of some input and result class. A function is a subtype if it is input contravariant, and result covariant. Rules for evaluating return types <= output_cls: * Any always returns True * Union[A, B] returns true if either A or B are covariant * f(arg_name:TypeVar) -> TypeVar compares input_cls and output_cls * Simple return types checked via issubclass Args: input_cls - input class for first argument to function output_cls - output class for function result |
158,831 | from .calls import Call, FuncArg
from .error import ShortException
from .symbolic import strip_symbolic
from inspect import isclass, isfunction
from typing import get_type_hints
from .utils import is_dispatch_func_subtype
class Call:
"""Represent python operations.
This class is responsible for representing the pieces of a python expression,
as a function, along with its args and kwargs.
For example, "some_object.a" would be represented at the function "__getattr__",
with the args `some_object`, and `"a"`.
Parameters
----------
func :
Name of the function called. Class methods are represented using the names
they have when defined on the class.
*args :
Arguments the function call uses.
**kwargs :
Keyword arguments the function call uses.
Examples
--------
>>> Call("__add__", 1, 1)
(1 + 1)
See Also
--------
siuba.siu.Symbolic : Helper class for creating Calls.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""Return a (best guess) python code representation of the Call.
Note that this is not necessarily valid python code (e.g. if a python
function is passed as a call argument).
"""
# TODO: format binary, unary, call, associative
if self.func in BINARY_OPS:
op_repr = BINARY_OPS[self.func]
fmt = "({args[0]} {func} {args[1]})"
elif self.func in UNARY_OPS:
op_repr = UNARY_OPS[self.func]
fmt = "({func}{args[0]})"
elif self.func == "getattr":
op_repr = "."
fmt = "({args[0]}{func}{args[1]})"
else:
op_repr, *arg_str = map(repr, self.args)
kwarg_str = (str(k) + " = " + repr(v) for k,v in self.kwargs.items())
combined_arg_str = ",".join(itertools.chain(arg_str, kwarg_str))
fmt = "{}({})".format(op_repr, combined_arg_str)
return fmt
return fmt.format(
func = op_repr or self.func,
args = self.args,
kwargs = self.kwargs
)
def __call__(self, x):
"""Evaluate a call over some context and return the result.
Note that subclasses like MetaArg, simply return the context, so that a call
acts like a unary function, with ``x`` as its argument.
Parameters
----------
x :
Object passed down the call tree as context.
Examples
--------
>>> expr = Call("__add__", MetaArg("_"), 2)
>>> expr(1) # 1 + 2
3
"""
args, kwargs = self.map_subcalls(self.evaluate_calls, args = (x,))
inst, *rest = args
#inst, *rest = (self.evaluate_calls(arg, x) for arg in self.args)
#kwargs = {k: self.evaluate_calls(v, x) for k, v in self.kwargs.items()}
# TODO: temporary workaround, for when only __get_attribute__ is defined
if self.func == "__getattr__":
return getattr(inst, *rest)
elif self.func == "__getitem__":
return operator.getitem(inst, *rest)
elif self.func == "__call__":
return getattr(inst, self.func)(*rest, **kwargs)
# in normal case, get method to call, and then call it
f_op = getattr(operator, self.func)
return f_op(inst, *rest, **kwargs)
# TODO: type checks will be very useful here. Will need to import symbolic.
# Let's do this once types are in a _typing.py submodule.
def __rshift__(self, x):
"""Create a"""
from .symbolic import strip_symbolic
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
return self._construct_pipe(self, x)
raise TypeError()
def __rrshift__(self, x):
from .symbolic import strip_symbolic
if isinstance(strip_symbolic(x), (Call)):
# only allow non-calls (i.e. data) on the left.
raise TypeError()
return self(x)
def evaluate_calls(arg, x):
if isinstance(arg, Call): return arg(x)
return arg
def copy(self) -> "Call":
"""Return a copy of this call object.
Note that copies are made of child calls, but not their arguments.
"""
args, kwargs = self.map_subcalls(lambda child: child.copy())
return self.__class__(self.func, *args, **kwargs)
def map_subcalls(self, f, args = tuple(), kwargs = None):
"""Call a function on all child calls.
Parameters
----------
f :
A function to call on any child calls.
args:
Optional position arguments to pass to ``f``.
kwargs:
Optional keyword arguments to pass to ``f``.
Returns
-------
A tuple of (new_args, new_kwargs) that can be used to recreate the original
Call object with transformed (copies of) child nodes.
See Also
--------
copy : Recursively calls map_subcalls to clone a call tree.
"""
if kwargs is None: kwargs = {}
new_args = tuple(f(arg, *args, **kwargs) if isinstance(arg, Call) else arg for arg in self.args)
new_kwargs = {k: f(v, *args, **kwargs) if isinstance(v, Call) else v for k,v in self.kwargs.items()}
return new_args, new_kwargs
def map_replace(self, f):
args, kwargs = self.map_subcalls(f)
return self.__class__(self.func, *args, **kwargs)
def iter_subcalls(self, f):
yield from iter(arg for arg in self.args if instance(arg, Call))
yield from iter(v for k,v in self.kwargs.items() if isinstance(v, Call))
def op_vars(self, attr_calls = True):
"""Return set of all variable names used in Call
Args:
attr_calls: whether to include called attributes (e.g. 'a' from _.a())
"""
varnames = set()
op_var = self._get_op_var()
if op_var is not None:
varnames.add(op_var)
if (not attr_calls
and self.func == "__call__"
and isinstance(self.args[0], Call)
and self.args[0].func == "__getattr__"
):
# skip obj, since it fetches an attribute this node is calling
prev_obj, prev_attr = self.args[0].args
all_args = itertools.chain([prev_obj], self.args[1:], self.kwargs.values())
else:
all_args = itertools.chain(self.args, self.kwargs.values())
for arg in all_args:
if isinstance(arg, Call):
varnames.update(arg.op_vars(attr_calls = attr_calls))
return varnames
def _get_op_var(self):
if self.func in ("__getattr__", "__getitem__") and isinstance(self.args[1], str):
return self.args[1]
def obj_name(self):
obj = self.args[0]
if isinstance(obj, Call):
if obj.func == "__getattr__":
return obj.args[0]
elif hasattr(obj, '__name__'):
return obj.__name__
return None
def _construct_pipe(lhs, rhs):
if isinstance(lhs, PipeCall):
lh_args = lhs.args
# ensure we don't keep adding MetaArg to the left when
# combining two pipes
if lh_args and isinstance(lh_args[0], MetaArg):
lh_args = lh_args[1:]
else:
lh_args = [lhs]
if isinstance(rhs, PipeCall):
rh_args = rhs.args
# similar to above, but for rh args
if rh_args and isinstance(rh_args[0], MetaArg):
rh_args = rh_args[1:]
else:
rh_args = [rhs]
return PipeCall(MetaArg("_"), *lh_args, *rh_args)
def get_attr_chain(node, max_n):
# TODO: need to make custom calls their own Call class, then will not have to
# do these kinds of checks, since a __call__ will always be on a Call obj
if not isinstance(node, Call):
return [], node
out = []
ttl_n = 0
crnt_node = node
while ttl_n < max_n:
if crnt_node.func != "__getattr__": break
obj, attr = crnt_node.args
out.append(attr)
ttl_n += 1
crnt_node = obj
return list(reversed(out)), crnt_node | null |
158,832 | import sys
import importlib
from abc import ABCMeta
def _load_class(mod_name: str, cls_name: str):
mod = importlib.import_module(mod_name)
return getattr(mod, cls_name) | null |
158,833 | from functools import singledispatch, update_wrapper, wraps
import inspect
from .calls import Call, FuncArg, MetaArg, Lazy, PipeCall, _Isolate
from .symbolic import Symbolic, create_sym_call, strip_symbolic
from typing import Callable
def _dispatch_not_impl(func_name):
def f(x, *args, **kwargs):
raise TypeError("singledispatch function {func_name} not implemented for type {type}"
.format(func_name = func_name, type = type(x))
)
return f
class Call:
"""Represent python operations.
This class is responsible for representing the pieces of a python expression,
as a function, along with its args and kwargs.
For example, "some_object.a" would be represented at the function "__getattr__",
with the args `some_object`, and `"a"`.
Parameters
----------
func :
Name of the function called. Class methods are represented using the names
they have when defined on the class.
*args :
Arguments the function call uses.
**kwargs :
Keyword arguments the function call uses.
Examples
--------
>>> Call("__add__", 1, 1)
(1 + 1)
See Also
--------
siuba.siu.Symbolic : Helper class for creating Calls.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""Return a (best guess) python code representation of the Call.
Note that this is not necessarily valid python code (e.g. if a python
function is passed as a call argument).
"""
# TODO: format binary, unary, call, associative
if self.func in BINARY_OPS:
op_repr = BINARY_OPS[self.func]
fmt = "({args[0]} {func} {args[1]})"
elif self.func in UNARY_OPS:
op_repr = UNARY_OPS[self.func]
fmt = "({func}{args[0]})"
elif self.func == "getattr":
op_repr = "."
fmt = "({args[0]}{func}{args[1]})"
else:
op_repr, *arg_str = map(repr, self.args)
kwarg_str = (str(k) + " = " + repr(v) for k,v in self.kwargs.items())
combined_arg_str = ",".join(itertools.chain(arg_str, kwarg_str))
fmt = "{}({})".format(op_repr, combined_arg_str)
return fmt
return fmt.format(
func = op_repr or self.func,
args = self.args,
kwargs = self.kwargs
)
def __call__(self, x):
"""Evaluate a call over some context and return the result.
Note that subclasses like MetaArg, simply return the context, so that a call
acts like a unary function, with ``x`` as its argument.
Parameters
----------
x :
Object passed down the call tree as context.
Examples
--------
>>> expr = Call("__add__", MetaArg("_"), 2)
>>> expr(1) # 1 + 2
3
"""
args, kwargs = self.map_subcalls(self.evaluate_calls, args = (x,))
inst, *rest = args
#inst, *rest = (self.evaluate_calls(arg, x) for arg in self.args)
#kwargs = {k: self.evaluate_calls(v, x) for k, v in self.kwargs.items()}
# TODO: temporary workaround, for when only __get_attribute__ is defined
if self.func == "__getattr__":
return getattr(inst, *rest)
elif self.func == "__getitem__":
return operator.getitem(inst, *rest)
elif self.func == "__call__":
return getattr(inst, self.func)(*rest, **kwargs)
# in normal case, get method to call, and then call it
f_op = getattr(operator, self.func)
return f_op(inst, *rest, **kwargs)
# TODO: type checks will be very useful here. Will need to import symbolic.
# Let's do this once types are in a _typing.py submodule.
def __rshift__(self, x):
"""Create a"""
from .symbolic import strip_symbolic
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
return self._construct_pipe(self, x)
raise TypeError()
def __rrshift__(self, x):
from .symbolic import strip_symbolic
if isinstance(strip_symbolic(x), (Call)):
# only allow non-calls (i.e. data) on the left.
raise TypeError()
return self(x)
def evaluate_calls(arg, x):
if isinstance(arg, Call): return arg(x)
return arg
def copy(self) -> "Call":
"""Return a copy of this call object.
Note that copies are made of child calls, but not their arguments.
"""
args, kwargs = self.map_subcalls(lambda child: child.copy())
return self.__class__(self.func, *args, **kwargs)
def map_subcalls(self, f, args = tuple(), kwargs = None):
"""Call a function on all child calls.
Parameters
----------
f :
A function to call on any child calls.
args:
Optional position arguments to pass to ``f``.
kwargs:
Optional keyword arguments to pass to ``f``.
Returns
-------
A tuple of (new_args, new_kwargs) that can be used to recreate the original
Call object with transformed (copies of) child nodes.
See Also
--------
copy : Recursively calls map_subcalls to clone a call tree.
"""
if kwargs is None: kwargs = {}
new_args = tuple(f(arg, *args, **kwargs) if isinstance(arg, Call) else arg for arg in self.args)
new_kwargs = {k: f(v, *args, **kwargs) if isinstance(v, Call) else v for k,v in self.kwargs.items()}
return new_args, new_kwargs
def map_replace(self, f):
args, kwargs = self.map_subcalls(f)
return self.__class__(self.func, *args, **kwargs)
def iter_subcalls(self, f):
yield from iter(arg for arg in self.args if instance(arg, Call))
yield from iter(v for k,v in self.kwargs.items() if isinstance(v, Call))
def op_vars(self, attr_calls = True):
"""Return set of all variable names used in Call
Args:
attr_calls: whether to include called attributes (e.g. 'a' from _.a())
"""
varnames = set()
op_var = self._get_op_var()
if op_var is not None:
varnames.add(op_var)
if (not attr_calls
and self.func == "__call__"
and isinstance(self.args[0], Call)
and self.args[0].func == "__getattr__"
):
# skip obj, since it fetches an attribute this node is calling
prev_obj, prev_attr = self.args[0].args
all_args = itertools.chain([prev_obj], self.args[1:], self.kwargs.values())
else:
all_args = itertools.chain(self.args, self.kwargs.values())
for arg in all_args:
if isinstance(arg, Call):
varnames.update(arg.op_vars(attr_calls = attr_calls))
return varnames
def _get_op_var(self):
if self.func in ("__getattr__", "__getitem__") and isinstance(self.args[1], str):
return self.args[1]
def obj_name(self):
obj = self.args[0]
if isinstance(obj, Call):
if obj.func == "__getattr__":
return obj.args[0]
elif hasattr(obj, '__name__'):
return obj.__name__
return None
def _construct_pipe(lhs, rhs):
if isinstance(lhs, PipeCall):
lh_args = lhs.args
# ensure we don't keep adding MetaArg to the left when
# combining two pipes
if lh_args and isinstance(lh_args[0], MetaArg):
lh_args = lh_args[1:]
else:
lh_args = [lhs]
if isinstance(rhs, PipeCall):
rh_args = rhs.args
# similar to above, but for rh args
if rh_args and isinstance(rh_args[0], MetaArg):
rh_args = rh_args[1:]
else:
rh_args = [rhs]
return PipeCall(MetaArg("_"), *lh_args, *rh_args)
class FuncArg(Call):
"""Represent a function to be called."""
def __init__(self, func, *args, **kwargs):
self.func = '__custom_func__'
if func == '__custom_func__':
func = args[0]
self.args = tuple([func])
self.kwargs = {}
def __repr__(self):
return repr(self.args[0])
def __call__(self, x):
return self.args[0]
class Symbolic(object):
def __init__(self, source = None, ready_to_call = False):
self.__source = MetaArg("_") if source is None else source
self.__ready_to_call = ready_to_call
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""Handle numpy universal functions. E.g. np.sqrt(_)."""
return array_ufunc(self, ufunc, method, *inputs, **kwargs)
def __array_function__(self, func, types, args, kwargs):
return array_function(self, func, types, *args, **kwargs)
# since we override __eq__, we must explicitly set the hash method back to default
__hash__ = object.__hash__
# allowed methods ----
def __getattr__(self, x):
# temporary hack working around ipython pretty.py printing
#if x == "__class__": return Symbolic
return Symbolic(BinaryOp(
"__getattr__",
self.__source,
strip_symbolic(x)
))
def __call__(self, *args, **kwargs) -> "Symbolic":
if self.__ready_to_call:
return self.__source(*args, **kwargs)
return create_sym_call(self.__source, *args, **kwargs)
def __getitem__(self, x):
return Symbolic(BinaryOp(
"__getitem__",
self.__source,
slice_to_call(x),
),
ready_to_call = True)
def __invert__(self):
if isinstance(self.__source, Call) and self.__source.func == "__invert__":
return self.__source.args[0]
else:
return self.__op_invert()
def __op_invert(self):
return Symbolic(UnaryOp('__invert__', self.__source), ready_to_call = True)
def __rshift__(self, x):
# Note that this and __rrshift__ are copied from Call
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
lhs_call = self.__source
return self.__class__(Call._construct_pipe(lhs_call, stripped))
# strip_symbolic(self)(x)
# x is a symbolic
raise NotImplementedError("Symbolic may only be used on right-hand side of >> operator.")
def __rrshift__(self, x):
if isinstance(x, (Symbolic, Call)):
raise NotImplementedError()
return strip_symbolic(self)(x)
# banned methods ----
__contains__ = None
__iter__ = None
def __bool__(self):
raise TypeError("Symbolic objects can not be converted to True/False, or used "
"with these keywords: not, and, or.")
# representation ----
def __repr__(self):
return Formatter().format(self.__source)
# unary operators ----
# note that __invert__ is handled in a custom way above
__neg__ = create_unary_op("__neg__")
__pos__ = create_unary_op("__pos__")
__abs__ = create_unary_op("__abs__")
# binary operators ----
__add__ = create_binary_op("__add__")
__sub__ = create_binary_op("__sub__")
__mul__ = create_binary_op("__mul__")
__matmul__ = create_binary_op("__matmul__")
__truediv__ = create_binary_op("__truediv__")
__floordiv__ = create_binary_op("__floordiv__")
__mod__ = create_binary_op("__mod__")
__divmod__ = create_binary_op("__divmod__")
__pow__ = create_binary_op("__pow__")
__lshift__ = create_binary_op("__lshift__")
__and__ = create_binary_op("__and__")
__xor__ = create_binary_op("__xor__")
__or__ = create_binary_op("__or__")
__gt__ = create_binary_op("__gt__")
__lt__ = create_binary_op("__lt__")
__eq__ = create_binary_op("__eq__")
__ne__ = create_binary_op("__ne__")
__ge__ = create_binary_op("__ge__")
__le__ = create_binary_op("__le__")
__radd__ = create_binary_op("__radd__", False)
__rsub__ = create_binary_op("__rsub__", False)
__rmul__ = create_binary_op("__rmul__", False)
__rmatmul__ = create_binary_op("__rmatmul__", False)
__rtruediv__ = create_binary_op("__rtruediv__", False)
__rfloordiv__ = create_binary_op("__rfloordiv__", False)
__rmod__ = create_binary_op("__rmod__", False)
__rdivmod__ = create_binary_op("__rdivmod__", False)
__rpow__ = create_binary_op("__rpow__", False)
__rlshift__ = create_binary_op("__rlshift__", False)
__rand__ = create_binary_op("__rand__", False)
__rxor__ = create_binary_op("__rxor__", False)
__ror__ = create_binary_op("__ror__", False)
def create_sym_call(__source, *args, **kwargs):
return Symbolic(Call(
"__call__",
strip_symbolic(__source),
*map(strip_symbolic, args),
**{k: strip_symbolic(v) for k,v in kwargs.items()}
),
ready_to_call = True)
def strip_symbolic(x):
if isinstance(x, Symbolic):
return x.__dict__["_Symbolic__source"]
return x
The provided code snippet includes necessary dependencies for implementing the `symbolic_dispatch` function. Write a Python function `def symbolic_dispatch(f = None, cls = object)` to solve the following problem:
Return a generic dispatch function with symbolic data implementations. The function dispatches (Call or Symbolic) -> FuncArg. Parameters ---------- cls : A class to dispatch on. f : A function to call if no classes match while dispatching. Examples -------- Here is an example of running separate add functions on integers and strings. >>> @symbolic_dispatch(cls = int) ... def add1(x): return x + 1 >>> @add1.register(str) ... def _add1_str(x): return int(x) + 1 >>> add1(1) 2 >>> add1("1") 2 Note that passing a symbolic causes it to return a symbolic, so you can continue creating expressions. >>> from siuba.siu import _ >>> type(add1(_.a.b) + _.c.d) <class 'siuba.siu.symbolic.Symbolic'> symbolic dispatch raises a NotImplementedError by default if it no function ``f`` is passed. However, you can override the default as follows: >>> @symbolic_dispatch ... def my_func(x): raise NotImplementedError("some error message")
Here is the function:
def symbolic_dispatch(f = None, cls = object):
"""Return a generic dispatch function with symbolic data implementations.
The function dispatches (Call or Symbolic) -> FuncArg.
Parameters
----------
cls :
A class to dispatch on.
f :
A function to call if no classes match while dispatching.
Examples
--------
Here is an example of running separate add functions on integers and strings.
>>> @symbolic_dispatch(cls = int)
... def add1(x): return x + 1
>>> @add1.register(str)
... def _add1_str(x): return int(x) + 1
>>> add1(1)
2
>>> add1("1")
2
Note that passing a symbolic causes it to return a symbolic, so you can continue
creating expressions.
>>> from siuba.siu import _
>>> type(add1(_.a.b) + _.c.d)
<class 'siuba.siu.symbolic.Symbolic'>
symbolic dispatch raises a NotImplementedError by default if it no function ``f``
is passed. However, you can override the default as follows:
>>> @symbolic_dispatch
... def my_func(x): raise NotImplementedError("some error message")
"""
if f is None:
return lambda f: symbolic_dispatch(f, cls)
# TODO: don't use singledispatch if it has already been done
dispatch_func = singledispatch(f)
if cls is not object:
dispatch_func.register(cls, f)
dispatch_func.register(object, _dispatch_not_impl(dispatch_func.__name__))
@dispatch_func.register(Symbolic)
def _dispatch_symbol(__data, *args, **kwargs):
return create_sym_call(FuncArg(dispatch_func), strip_symbolic(__data), *args, **kwargs)
@dispatch_func.register(Call)
def _dispatch_call(__data, *args, **kwargs):
# TODO: want to just create call, for now use hack of creating a symbolic
# call and getting the source off of it...
return strip_symbolic(create_sym_call(FuncArg(dispatch_func), __data, *args, **kwargs))
return dispatch_func | Return a generic dispatch function with symbolic data implementations. The function dispatches (Call or Symbolic) -> FuncArg. Parameters ---------- cls : A class to dispatch on. f : A function to call if no classes match while dispatching. Examples -------- Here is an example of running separate add functions on integers and strings. >>> @symbolic_dispatch(cls = int) ... def add1(x): return x + 1 >>> @add1.register(str) ... def _add1_str(x): return int(x) + 1 >>> add1(1) 2 >>> add1("1") 2 Note that passing a symbolic causes it to return a symbolic, so you can continue creating expressions. >>> from siuba.siu import _ >>> type(add1(_.a.b) + _.c.d) <class 'siuba.siu.symbolic.Symbolic'> symbolic dispatch raises a NotImplementedError by default if it no function ``f`` is passed. However, you can override the default as follows: >>> @symbolic_dispatch ... def my_func(x): raise NotImplementedError("some error message") |
158,834 | from functools import singledispatch, update_wrapper, wraps
import inspect
from .calls import Call, FuncArg, MetaArg, Lazy, PipeCall, _Isolate
from .symbolic import Symbolic, create_sym_call, strip_symbolic
from typing import Callable
class NoArgs: pass
def pipe_no_args(f):
"""Register a concrete function that handles when a verb received no arguments."""
def wrapper(__data, *args, **kwargs):
# e.g. head() -> Pipeable(_ -> head(_))
return create_pipe_call(f, MetaArg("_"), *args, **kwargs)
return f
def register_pipe(f, cls):
"""Register a concrete function that returns a Pipeable when called."""
def wrapper(*args, **kwargs):
return create_pipe_call(f, MetaArg("_"), *args, **kwargs)
return f
def register_pipe_call(f):
"""Register a concrete function that ."""
def f_dispatch(__data, *args, **kwargs):
call = __data
if isinstance(call, MetaArg):
# single _ passed as first arg to function
# e.g. mutate(_, _.id) -> Pipeable(_ -> mutate(_, _.id))
return create_pipe_call(f, call, *args, **kwargs)
else:
# more complex _ expr passed as first arg to function
# e.g. mutate(_.id) -> Pipeable(_ -> mutate(_, _.id))
return create_pipe_call(f, MetaArg("_"), call, *args, **kwargs)
return f
def strip_symbolic(x):
if isinstance(x, Symbolic):
return x.__dict__["_Symbolic__source"]
return x
The provided code snippet includes necessary dependencies for implementing the `verb_dispatch` function. Write a Python function `def verb_dispatch(cls, f = None)` to solve the following problem:
Wrap singledispatch. Making sure to keep its attributes on the wrapper. This wrapper has three jobs: 1. strip symbols off of calls 2. pass NoArgs instance for calls like some_func(), so dispatcher can handle 3. return a Pipeable when the first arg of a call is a symbol Parameters ---------- cls : A class to dispatch on. f : A function to call if no classes match while dispatching.
Here is the function:
def verb_dispatch(cls, f = None):
"""Wrap singledispatch. Making sure to keep its attributes on the wrapper.
This wrapper has three jobs:
1. strip symbols off of calls
2. pass NoArgs instance for calls like some_func(), so dispatcher can handle
3. return a Pipeable when the first arg of a call is a symbol
Parameters
----------
cls :
A class to dispatch on.
f :
A function to call if no classes match while dispatching.
"""
# classic way of allowing args to a decorator
if f is None:
return lambda f: verb_dispatch(cls, f)
# initially registers func for object, so need to change to pd.DataFrame
dispatch_func = singledispatch(f)
if isinstance(cls, tuple):
for c in cls: dispatch_func.register(c, f)
else:
dispatch_func.register(cls, f)
# then, set the default object dispatcher to create a pipe
register_pipe(dispatch_func, object)
# register dispatcher for Call, and NoArgs
register_pipe_call(dispatch_func)
pipe_no_args(dispatch_func)
@wraps(dispatch_func)
def wrapper(*args, **kwargs):
strip_args = map(strip_symbolic, args)
strip_kwargs = {k: strip_symbolic(v) for k,v in kwargs.items()}
if not args:
return dispatch_func(NoArgs(), **strip_kwargs)
return dispatch_func(*strip_args, **strip_kwargs)
return wrapper | Wrap singledispatch. Making sure to keep its attributes on the wrapper. This wrapper has three jobs: 1. strip symbols off of calls 2. pass NoArgs instance for calls like some_func(), so dispatcher can handle 3. return a Pipeable when the first arg of a call is a symbol Parameters ---------- cls : A class to dispatch on. f : A function to call if no classes match while dispatching. |
158,835 | from functools import singledispatch, update_wrapper, wraps
import inspect
from .calls import Call, FuncArg, MetaArg, Lazy, PipeCall, _Isolate
from .symbolic import Symbolic, create_sym_call, strip_symbolic
from typing import Callable
def call(__func: "Callable | Call | Symbolic", *args, **kwargs):
"""Allow a function call to be used in a call (with >>).
Parameters
----------
__func:
A function to be called as part of a call.
*args:
Additional position arguments to pass to the function.
**kwargs:
Additional keyword arguments to pass to the function.
Examples
--------
The simplest use of the call is passing just the to-be-called function.
>>> s = "a string"
>>> s >> call(print)
a string
This is equivalent to explicitly passing ``_`` as a placeholder.
>>> from siuba import _
>>> s >> call(print, _)
a string
The explicit syntax is useful, because it allows us to pass more arguments.
For example, the code below passes additional arguments to print.
>>> "a" >> call(print, _, "string", sep=" ")
a string
You can transform the input data. For example, the code below passes "shout".upper()
to print.
>>> "shout" >> call(print, _.upper())
SHOUT
Since ``_`` is just a placeholder for the data on the left-hand-side of >>, you
can pass it multiple times to the to-be-called function (e.g. print).
>>> "nice" >> call(print, _, _, sep=" ")
nice nice
Alternatively, you can pass a siu expression straight to call.
>>> "abc" >> call(_[0].upper())
'A'
"""
if isinstance(__func, (Symbolic, Call)):
if args or kwargs:
raise NotImplementedError(
"If a siu expression (e.g. _) is the first argument to call, it must "
"be the only argument. You can pass arguments using the form, "
"call(_.some_method(1, 2, c = 3))."
)
return strip_symbolic(__func)
if not args and not kwargs:
# handle implicit case, call(some_func) -> call(some_func, _)
return create_eager_pipe_call(__func, MetaArg("_"))
return create_eager_pipe_call(__func, *args, **kwargs)
class Call:
"""Represent python operations.
This class is responsible for representing the pieces of a python expression,
as a function, along with its args and kwargs.
For example, "some_object.a" would be represented at the function "__getattr__",
with the args `some_object`, and `"a"`.
Parameters
----------
func :
Name of the function called. Class methods are represented using the names
they have when defined on the class.
*args :
Arguments the function call uses.
**kwargs :
Keyword arguments the function call uses.
Examples
--------
>>> Call("__add__", 1, 1)
(1 + 1)
See Also
--------
siuba.siu.Symbolic : Helper class for creating Calls.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""Return a (best guess) python code representation of the Call.
Note that this is not necessarily valid python code (e.g. if a python
function is passed as a call argument).
"""
# TODO: format binary, unary, call, associative
if self.func in BINARY_OPS:
op_repr = BINARY_OPS[self.func]
fmt = "({args[0]} {func} {args[1]})"
elif self.func in UNARY_OPS:
op_repr = UNARY_OPS[self.func]
fmt = "({func}{args[0]})"
elif self.func == "getattr":
op_repr = "."
fmt = "({args[0]}{func}{args[1]})"
else:
op_repr, *arg_str = map(repr, self.args)
kwarg_str = (str(k) + " = " + repr(v) for k,v in self.kwargs.items())
combined_arg_str = ",".join(itertools.chain(arg_str, kwarg_str))
fmt = "{}({})".format(op_repr, combined_arg_str)
return fmt
return fmt.format(
func = op_repr or self.func,
args = self.args,
kwargs = self.kwargs
)
def __call__(self, x):
"""Evaluate a call over some context and return the result.
Note that subclasses like MetaArg, simply return the context, so that a call
acts like a unary function, with ``x`` as its argument.
Parameters
----------
x :
Object passed down the call tree as context.
Examples
--------
>>> expr = Call("__add__", MetaArg("_"), 2)
>>> expr(1) # 1 + 2
3
"""
args, kwargs = self.map_subcalls(self.evaluate_calls, args = (x,))
inst, *rest = args
#inst, *rest = (self.evaluate_calls(arg, x) for arg in self.args)
#kwargs = {k: self.evaluate_calls(v, x) for k, v in self.kwargs.items()}
# TODO: temporary workaround, for when only __get_attribute__ is defined
if self.func == "__getattr__":
return getattr(inst, *rest)
elif self.func == "__getitem__":
return operator.getitem(inst, *rest)
elif self.func == "__call__":
return getattr(inst, self.func)(*rest, **kwargs)
# in normal case, get method to call, and then call it
f_op = getattr(operator, self.func)
return f_op(inst, *rest, **kwargs)
# TODO: type checks will be very useful here. Will need to import symbolic.
# Let's do this once types are in a _typing.py submodule.
def __rshift__(self, x):
"""Create a"""
from .symbolic import strip_symbolic
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
return self._construct_pipe(self, x)
raise TypeError()
def __rrshift__(self, x):
from .symbolic import strip_symbolic
if isinstance(strip_symbolic(x), (Call)):
# only allow non-calls (i.e. data) on the left.
raise TypeError()
return self(x)
def evaluate_calls(arg, x):
if isinstance(arg, Call): return arg(x)
return arg
def copy(self) -> "Call":
"""Return a copy of this call object.
Note that copies are made of child calls, but not their arguments.
"""
args, kwargs = self.map_subcalls(lambda child: child.copy())
return self.__class__(self.func, *args, **kwargs)
def map_subcalls(self, f, args = tuple(), kwargs = None):
"""Call a function on all child calls.
Parameters
----------
f :
A function to call on any child calls.
args:
Optional position arguments to pass to ``f``.
kwargs:
Optional keyword arguments to pass to ``f``.
Returns
-------
A tuple of (new_args, new_kwargs) that can be used to recreate the original
Call object with transformed (copies of) child nodes.
See Also
--------
copy : Recursively calls map_subcalls to clone a call tree.
"""
if kwargs is None: kwargs = {}
new_args = tuple(f(arg, *args, **kwargs) if isinstance(arg, Call) else arg for arg in self.args)
new_kwargs = {k: f(v, *args, **kwargs) if isinstance(v, Call) else v for k,v in self.kwargs.items()}
return new_args, new_kwargs
def map_replace(self, f):
args, kwargs = self.map_subcalls(f)
return self.__class__(self.func, *args, **kwargs)
def iter_subcalls(self, f):
yield from iter(arg for arg in self.args if instance(arg, Call))
yield from iter(v for k,v in self.kwargs.items() if isinstance(v, Call))
def op_vars(self, attr_calls = True):
"""Return set of all variable names used in Call
Args:
attr_calls: whether to include called attributes (e.g. 'a' from _.a())
"""
varnames = set()
op_var = self._get_op_var()
if op_var is not None:
varnames.add(op_var)
if (not attr_calls
and self.func == "__call__"
and isinstance(self.args[0], Call)
and self.args[0].func == "__getattr__"
):
# skip obj, since it fetches an attribute this node is calling
prev_obj, prev_attr = self.args[0].args
all_args = itertools.chain([prev_obj], self.args[1:], self.kwargs.values())
else:
all_args = itertools.chain(self.args, self.kwargs.values())
for arg in all_args:
if isinstance(arg, Call):
varnames.update(arg.op_vars(attr_calls = attr_calls))
return varnames
def _get_op_var(self):
if self.func in ("__getattr__", "__getitem__") and isinstance(self.args[1], str):
return self.args[1]
def obj_name(self):
obj = self.args[0]
if isinstance(obj, Call):
if obj.func == "__getattr__":
return obj.args[0]
elif hasattr(obj, '__name__'):
return obj.__name__
return None
def _construct_pipe(lhs, rhs):
if isinstance(lhs, PipeCall):
lh_args = lhs.args
# ensure we don't keep adding MetaArg to the left when
# combining two pipes
if lh_args and isinstance(lh_args[0], MetaArg):
lh_args = lh_args[1:]
else:
lh_args = [lhs]
if isinstance(rhs, PipeCall):
rh_args = rhs.args
# similar to above, but for rh args
if rh_args and isinstance(rh_args[0], MetaArg):
rh_args = rh_args[1:]
else:
rh_args = [rhs]
return PipeCall(MetaArg("_"), *lh_args, *rh_args)
class MetaArg(Call):
"""Represent an argument, by returning the argument passed to __call__."""
def __init__(self, func, *args, **kwargs):
self.func = "_"
self.args = tuple()
self.kwargs = {}
def __repr__(self):
return self.func
def __call__(self, x):
if isinstance(x, FormulaContext):
return x[self.func]
return x
class PipeCall(Call):
"""
pipe(df, a, b, c)
pipe(_, a, b, c)
should options for first arg be only MetaArg or a non-call?
"""
def __init__(self, func, *args, **kwargs):
if isinstance(func, str) and func == "__siu_pipe_call__":
# it was a mistake to make func the first parameter to Call
# but basically we need to catch when it is passed, so
# we can ignore it
self.func = func
self.args = args
else:
self.func = "__siu_pipe_call__"
self.args = (func, *args)
if kwargs:
raise ValueError("Keyword arguments are not allowed.")
self.kwargs = {}
def __call__(self, x=None):
# Note that most calls map_subcalls to pass in the same data for each argument.
# In contrast, PipeCall passes data from the prev step to the next.
crnt_data, *calls = self.args
if isinstance(crnt_data, MetaArg):
crnt_data = crnt_data(x)
for call in calls:
new_data = call(crnt_data)
crnt_data = new_data
return crnt_data
def __repr__(self):
args_repr = ",".join(map(repr, self.args))
return f"{type(self).__name__}({args_repr})"
def strip_symbolic(x):
if isinstance(x, Symbolic):
return x.__dict__["_Symbolic__source"]
return x
The provided code snippet includes necessary dependencies for implementing the `pipe` function. Write a Python function `def pipe(__data, *args: Callable)` to solve the following problem:
Pipe data through a chain of callables. Return the final result. Examples -------- Case 1: pipe regular functions >>> pipe({"a": 1}, lambda x: x["a"], lambda x: x + 1) 2 Case 2: pipe to siu expressions >>> from siuba import _ >>> pipe({"a": 1}, _["a"], _ + 1) 2 Case 3: call external function on siu expression >>> from siuba.siu import call >>> pipe({"a": 1}, call(isinstance, _["a"], int)) True Case 4: _ as first arg to delay >>> f = pipe(_, lambda x: x["a"]) >>> f PipeCall(...) >>> f({"a": 1}) 1 Example: using with verb >>> from siuba import _, summarize >>> from siuba.data import mtcars >>> pipe(mtcars, summarize(res = _.hp.mean())) res 0 146.6875
Here is the function:
def pipe(__data, *args: Callable):
"""Pipe data through a chain of callables. Return the final result.
Examples
--------
Case 1: pipe regular functions
>>> pipe({"a": 1}, lambda x: x["a"], lambda x: x + 1)
2
Case 2: pipe to siu expressions
>>> from siuba import _
>>> pipe({"a": 1}, _["a"], _ + 1)
2
Case 3: call external function on siu expression
>>> from siuba.siu import call
>>> pipe({"a": 1}, call(isinstance, _["a"], int))
True
Case 4: _ as first arg to delay
>>> f = pipe(_, lambda x: x["a"])
>>> f
PipeCall(...)
>>> f({"a": 1})
1
Example: using with verb
>>> from siuba import _, summarize
>>> from siuba.data import mtcars
>>> pipe(mtcars, summarize(res = _.hp.mean()))
res
0 146.6875
"""
stripped = strip_symbolic(__data)
# Special case: support backwards compatibility with old pipe() behavior ----
# call() and Call.__rrshift__ now handle this behavior.
if len(args) == 0:
if isinstance(stripped, Call):
return stripped
else:
return call(stripped)
# When data is _, return a pipe call
pipe_call = PipeCall(stripped, *map(strip_symbolic, args))
if isinstance(stripped, MetaArg):
return pipe_call
return pipe_call() | Pipe data through a chain of callables. Return the final result. Examples -------- Case 1: pipe regular functions >>> pipe({"a": 1}, lambda x: x["a"], lambda x: x + 1) 2 Case 2: pipe to siu expressions >>> from siuba import _ >>> pipe({"a": 1}, _["a"], _ + 1) 2 Case 3: call external function on siu expression >>> from siuba.siu import call >>> pipe({"a": 1}, call(isinstance, _["a"], int)) True Case 4: _ as first arg to delay >>> f = pipe(_, lambda x: x["a"]) >>> f PipeCall(...) >>> f({"a": 1}) 1 Example: using with verb >>> from siuba import _, summarize >>> from siuba.data import mtcars >>> pipe(mtcars, summarize(res = _.hp.mean())) res 0 146.6875 |
158,836 | from contextlib import contextmanager, AbstractContextManager
import sys
import copy
The provided code snippet includes necessary dependencies for implementing the `last` function. Write a Python function `def last()` to solve the following problem:
Return last error with suppress context set to False. Note: makes a copy of last error, since interactive shells like ipython do later handling of exceptions, so a context manager is not an option.
Here is the function:
def last():
"""Return last error with suppress context set to False.
Note: makes a copy of last error, since interactive shells like ipython
do later handling of exceptions, so a context manager is not an option.
"""
err = sys.last_value
if err is None:
return None
err.__suppress_context__ = False
return err | Return last error with suppress context set to False. Note: makes a copy of last error, since interactive shells like ipython do later handling of exceptions, so a context manager is not an option. |
158,837 | from functools import singledispatch
from .calls import Call, BinaryOp, BinaryRightOp, MetaArg, UnaryOp, SliceOp, FuncArg
from .format import Formatter
def strip_symbolic(x):
if isinstance(x, Symbolic):
return x.__dict__["_Symbolic__source"]
return x
class BinaryOp(Call):
"""Represent binary call operations."""
def __repr__(self):
return self._repr(reverse = False)
def _repr(self, reverse = False):
func_name = self.get_func_name()
level = BINARY_LEVELS[func_name]
spaces = "" if level == 0 else " "
args = self.args
arg0 = "({args[0]})" if self.needs_paren(args[0]) else "{args[0]}"
arg1 = "({args[1]})" if self.needs_paren(args[1]) else "{args[1]}"
# handle binary ops that are not infix operators
if self.func == "__getitem__":
suffix = "]"
else:
suffix = ""
# final, formatting
fmt = arg0 + "{spaces}{func}{spaces}" + arg1 + suffix
func = BINARY_OPS[func_name]
if self.func == "__getattr__":
# use bare string. eg _.a
fmt_args = [repr(args[0]), args[1]]
else:
fmt_args = list(map(repr, args))
if reverse:
fmt_args = list(reversed(fmt_args))
return fmt.format(func = func, args = fmt_args, spaces = spaces)
def get_func_name(self):
return BINARY_RIGHT_OPS.get(self.func, self.func)
def needs_paren(self, x):
if isinstance(x, BinaryOp):
sub_lvl = BINARY_LEVELS[x.get_func_name()]
level = BINARY_LEVELS[self.get_func_name()]
if sub_lvl != 0 and sub_lvl > level:
return True
return False
class BinaryRightOp(BinaryOp):
"""Represent right associative binary call operations."""
def __call__(self, x):
inst, *rest = (self.evaluate_calls(arg, x) for arg in self.args)
kwargs = {k: self.evaluate_calls(v, x) for k, v in self.kwargs.items()}
# in normal case, get method to call, and then call it
func_name = BINARY_RIGHT_OPS[self.func]
f_op = getattr(operator, func_name)
# TODO: in practice rest only has 1 item, but this is not enforced..
return f_op(*rest, inst, **kwargs)
def __repr__(self):
return self._repr(reverse = True)
def create_binary_op(op_name, left_op = True):
def _binary_op(self, x):
if left_op:
node = BinaryOp(op_name, strip_symbolic(self), strip_symbolic(x))
else:
node = BinaryRightOp(op_name, strip_symbolic(self), strip_symbolic(x))
return self.__class__(node, ready_to_call = True)
return _binary_op | null |
158,838 | from functools import singledispatch
from .calls import Call, BinaryOp, BinaryRightOp, MetaArg, UnaryOp, SliceOp, FuncArg
from .format import Formatter
def strip_symbolic(x):
if isinstance(x, Symbolic):
return x.__dict__["_Symbolic__source"]
return x
class UnaryOp(Call):
"""Represent unary call operations."""
def __repr__(self):
fmt = "{func}{args[0]}"
func = UNARY_OPS[self.func]
return fmt.format(func = func, args = self.args, kwargs = self.kwargs)
def create_unary_op(op_name):
def _unary_op(self):
node = UnaryOp(op_name, strip_symbolic(self))
return self.__class__(node, ready_to_call = True)
return _unary_op | null |
158,839 | from functools import singledispatch
from .calls import Call, BinaryOp, BinaryRightOp, MetaArg, UnaryOp, SliceOp, FuncArg
from .format import Formatter
def strip_symbolic(x):
if isinstance(x, Symbolic):
return x.__dict__["_Symbolic__source"]
return x
class SliceOp(ABC):
"""Factory class for representing from single and extended slice calls.
Note that it has SliceOpIndex, and SliceOpExt registered as subclasses, so
that this class can be used rather than the specific implementations.
"""
def __new__(cls, func, *args, **kwargs):
# must be constructed in the same way as __getitem__ sees the indexer.
# that is, a single argument (that can be a tuple)
if len(args) != 1:
raise ValueError("SliceOpIndex allows 1 argument, but received %s" % len(args))
elif isinstance(args[0], tuple):
# general case, where calling returns a tuple of indexers.
# e.g. _['a', ], or _[1:, :, :, :]
return _SliceOpExt(func, *args[0])
else:
# special case, where calling returns a single indexer, rather than
# a tuple of indexers (e.g. _['a'], or _[1:])
return _SliceOpIndex(func, args[0])
SliceOp.register(_SliceOpExt)
def slice_to_call(x):
# TODO: uses similar code to SliceOp. make a walk_slice function?
def f_strip(s):
if isinstance(s, slice):
return slice(*map(strip_symbolic, (s.start, s.stop, s.step)))
return strip_symbolic(s)
if isinstance(x, tuple):
arg = tuple(map(f_strip, x))
else:
arg = f_strip(x)
return SliceOp("__siu_slice__", arg) | null |
158,840 | from functools import singledispatch
from .calls import Call, BinaryOp, BinaryRightOp, MetaArg, UnaryOp, SliceOp, FuncArg
from .format import Formatter
class Symbolic(object):
def __init__(self, source = None, ready_to_call = False):
self.__source = MetaArg("_") if source is None else source
self.__ready_to_call = ready_to_call
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""Handle numpy universal functions. E.g. np.sqrt(_)."""
return array_ufunc(self, ufunc, method, *inputs, **kwargs)
def __array_function__(self, func, types, args, kwargs):
return array_function(self, func, types, *args, **kwargs)
# since we override __eq__, we must explicitly set the hash method back to default
__hash__ = object.__hash__
# allowed methods ----
def __getattr__(self, x):
# temporary hack working around ipython pretty.py printing
#if x == "__class__": return Symbolic
return Symbolic(BinaryOp(
"__getattr__",
self.__source,
strip_symbolic(x)
))
def __call__(self, *args, **kwargs) -> "Symbolic":
if self.__ready_to_call:
return self.__source(*args, **kwargs)
return create_sym_call(self.__source, *args, **kwargs)
def __getitem__(self, x):
return Symbolic(BinaryOp(
"__getitem__",
self.__source,
slice_to_call(x),
),
ready_to_call = True)
def __invert__(self):
if isinstance(self.__source, Call) and self.__source.func == "__invert__":
return self.__source.args[0]
else:
return self.__op_invert()
def __op_invert(self):
return Symbolic(UnaryOp('__invert__', self.__source), ready_to_call = True)
def __rshift__(self, x):
# Note that this and __rrshift__ are copied from Call
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
lhs_call = self.__source
return self.__class__(Call._construct_pipe(lhs_call, stripped))
# strip_symbolic(self)(x)
# x is a symbolic
raise NotImplementedError("Symbolic may only be used on right-hand side of >> operator.")
def __rrshift__(self, x):
if isinstance(x, (Symbolic, Call)):
raise NotImplementedError()
return strip_symbolic(self)(x)
# banned methods ----
__contains__ = None
__iter__ = None
def __bool__(self):
raise TypeError("Symbolic objects can not be converted to True/False, or used "
"with these keywords: not, and, or.")
# representation ----
def __repr__(self):
return Formatter().format(self.__source)
# unary operators ----
# note that __invert__ is handled in a custom way above
__neg__ = create_unary_op("__neg__")
__pos__ = create_unary_op("__pos__")
__abs__ = create_unary_op("__abs__")
# binary operators ----
__add__ = create_binary_op("__add__")
__sub__ = create_binary_op("__sub__")
__mul__ = create_binary_op("__mul__")
__matmul__ = create_binary_op("__matmul__")
__truediv__ = create_binary_op("__truediv__")
__floordiv__ = create_binary_op("__floordiv__")
__mod__ = create_binary_op("__mod__")
__divmod__ = create_binary_op("__divmod__")
__pow__ = create_binary_op("__pow__")
__lshift__ = create_binary_op("__lshift__")
__and__ = create_binary_op("__and__")
__xor__ = create_binary_op("__xor__")
__or__ = create_binary_op("__or__")
__gt__ = create_binary_op("__gt__")
__lt__ = create_binary_op("__lt__")
__eq__ = create_binary_op("__eq__")
__ne__ = create_binary_op("__ne__")
__ge__ = create_binary_op("__ge__")
__le__ = create_binary_op("__le__")
__radd__ = create_binary_op("__radd__", False)
__rsub__ = create_binary_op("__rsub__", False)
__rmul__ = create_binary_op("__rmul__", False)
__rmatmul__ = create_binary_op("__rmatmul__", False)
__rtruediv__ = create_binary_op("__rtruediv__", False)
__rfloordiv__ = create_binary_op("__rfloordiv__", False)
__rmod__ = create_binary_op("__rmod__", False)
__rdivmod__ = create_binary_op("__rdivmod__", False)
__rpow__ = create_binary_op("__rpow__", False)
__rlshift__ = create_binary_op("__rlshift__", False)
__rand__ = create_binary_op("__rand__", False)
__rxor__ = create_binary_op("__rxor__", False)
__ror__ = create_binary_op("__ror__", False)
def strip_symbolic(x):
if isinstance(x, Symbolic):
return x.__dict__["_Symbolic__source"]
return x
The provided code snippet includes necessary dependencies for implementing the `explain` function. Write a Python function `def explain(symbol)` to solve the following problem:
Print representation that resembles code used to create symbol.
Here is the function:
def explain(symbol):
"""Print representation that resembles code used to create symbol."""
if isinstance(symbol, Symbolic):
return str(strip_symbolic(symbol))
return str(symbol) | Print representation that resembles code used to create symbol. |
158,841 | from functools import singledispatch
from .calls import Call, BinaryOp, BinaryRightOp, MetaArg, UnaryOp, SliceOp, FuncArg
from .format import Formatter
def array_function(self, func, types, *args, **kwargs):
return func(*args, **kwargs)
class Call:
"""Represent python operations.
This class is responsible for representing the pieces of a python expression,
as a function, along with its args and kwargs.
For example, "some_object.a" would be represented at the function "__getattr__",
with the args `some_object`, and `"a"`.
Parameters
----------
func :
Name of the function called. Class methods are represented using the names
they have when defined on the class.
*args :
Arguments the function call uses.
**kwargs :
Keyword arguments the function call uses.
Examples
--------
>>> Call("__add__", 1, 1)
(1 + 1)
See Also
--------
siuba.siu.Symbolic : Helper class for creating Calls.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""Return a (best guess) python code representation of the Call.
Note that this is not necessarily valid python code (e.g. if a python
function is passed as a call argument).
"""
# TODO: format binary, unary, call, associative
if self.func in BINARY_OPS:
op_repr = BINARY_OPS[self.func]
fmt = "({args[0]} {func} {args[1]})"
elif self.func in UNARY_OPS:
op_repr = UNARY_OPS[self.func]
fmt = "({func}{args[0]})"
elif self.func == "getattr":
op_repr = "."
fmt = "({args[0]}{func}{args[1]})"
else:
op_repr, *arg_str = map(repr, self.args)
kwarg_str = (str(k) + " = " + repr(v) for k,v in self.kwargs.items())
combined_arg_str = ",".join(itertools.chain(arg_str, kwarg_str))
fmt = "{}({})".format(op_repr, combined_arg_str)
return fmt
return fmt.format(
func = op_repr or self.func,
args = self.args,
kwargs = self.kwargs
)
def __call__(self, x):
"""Evaluate a call over some context and return the result.
Note that subclasses like MetaArg, simply return the context, so that a call
acts like a unary function, with ``x`` as its argument.
Parameters
----------
x :
Object passed down the call tree as context.
Examples
--------
>>> expr = Call("__add__", MetaArg("_"), 2)
>>> expr(1) # 1 + 2
3
"""
args, kwargs = self.map_subcalls(self.evaluate_calls, args = (x,))
inst, *rest = args
#inst, *rest = (self.evaluate_calls(arg, x) for arg in self.args)
#kwargs = {k: self.evaluate_calls(v, x) for k, v in self.kwargs.items()}
# TODO: temporary workaround, for when only __get_attribute__ is defined
if self.func == "__getattr__":
return getattr(inst, *rest)
elif self.func == "__getitem__":
return operator.getitem(inst, *rest)
elif self.func == "__call__":
return getattr(inst, self.func)(*rest, **kwargs)
# in normal case, get method to call, and then call it
f_op = getattr(operator, self.func)
return f_op(inst, *rest, **kwargs)
# TODO: type checks will be very useful here. Will need to import symbolic.
# Let's do this once types are in a _typing.py submodule.
def __rshift__(self, x):
"""Create a"""
from .symbolic import strip_symbolic
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
return self._construct_pipe(self, x)
raise TypeError()
def __rrshift__(self, x):
from .symbolic import strip_symbolic
if isinstance(strip_symbolic(x), (Call)):
# only allow non-calls (i.e. data) on the left.
raise TypeError()
return self(x)
def evaluate_calls(arg, x):
if isinstance(arg, Call): return arg(x)
return arg
def copy(self) -> "Call":
"""Return a copy of this call object.
Note that copies are made of child calls, but not their arguments.
"""
args, kwargs = self.map_subcalls(lambda child: child.copy())
return self.__class__(self.func, *args, **kwargs)
def map_subcalls(self, f, args = tuple(), kwargs = None):
"""Call a function on all child calls.
Parameters
----------
f :
A function to call on any child calls.
args:
Optional position arguments to pass to ``f``.
kwargs:
Optional keyword arguments to pass to ``f``.
Returns
-------
A tuple of (new_args, new_kwargs) that can be used to recreate the original
Call object with transformed (copies of) child nodes.
See Also
--------
copy : Recursively calls map_subcalls to clone a call tree.
"""
if kwargs is None: kwargs = {}
new_args = tuple(f(arg, *args, **kwargs) if isinstance(arg, Call) else arg for arg in self.args)
new_kwargs = {k: f(v, *args, **kwargs) if isinstance(v, Call) else v for k,v in self.kwargs.items()}
return new_args, new_kwargs
def map_replace(self, f):
args, kwargs = self.map_subcalls(f)
return self.__class__(self.func, *args, **kwargs)
def iter_subcalls(self, f):
yield from iter(arg for arg in self.args if instance(arg, Call))
yield from iter(v for k,v in self.kwargs.items() if isinstance(v, Call))
def op_vars(self, attr_calls = True):
"""Return set of all variable names used in Call
Args:
attr_calls: whether to include called attributes (e.g. 'a' from _.a())
"""
varnames = set()
op_var = self._get_op_var()
if op_var is not None:
varnames.add(op_var)
if (not attr_calls
and self.func == "__call__"
and isinstance(self.args[0], Call)
and self.args[0].func == "__getattr__"
):
# skip obj, since it fetches an attribute this node is calling
prev_obj, prev_attr = self.args[0].args
all_args = itertools.chain([prev_obj], self.args[1:], self.kwargs.values())
else:
all_args = itertools.chain(self.args, self.kwargs.values())
for arg in all_args:
if isinstance(arg, Call):
varnames.update(arg.op_vars(attr_calls = attr_calls))
return varnames
def _get_op_var(self):
if self.func in ("__getattr__", "__getitem__") and isinstance(self.args[1], str):
return self.args[1]
def obj_name(self):
obj = self.args[0]
if isinstance(obj, Call):
if obj.func == "__getattr__":
return obj.args[0]
elif hasattr(obj, '__name__'):
return obj.__name__
return None
def _construct_pipe(lhs, rhs):
if isinstance(lhs, PipeCall):
lh_args = lhs.args
# ensure we don't keep adding MetaArg to the left when
# combining two pipes
if lh_args and isinstance(lh_args[0], MetaArg):
lh_args = lh_args[1:]
else:
lh_args = [lhs]
if isinstance(rhs, PipeCall):
rh_args = rhs.args
# similar to above, but for rh args
if rh_args and isinstance(rh_args[0], MetaArg):
rh_args = rh_args[1:]
else:
rh_args = [rhs]
return PipeCall(MetaArg("_"), *lh_args, *rh_args)
class FuncArg(Call):
"""Represent a function to be called."""
def __init__(self, func, *args, **kwargs):
self.func = '__custom_func__'
if func == '__custom_func__':
func = args[0]
self.args = tuple([func])
self.kwargs = {}
def __repr__(self):
return repr(self.args[0])
def __call__(self, x):
return self.args[0]
def _array_function_call(self, func, types, *args, **kwargs):
return Call("__call__", FuncArg(array_function), self, func, types, *args, **kwargs) | null |
158,842 | from functools import singledispatch
from .calls import Call, BinaryOp, BinaryRightOp, MetaArg, UnaryOp, SliceOp, FuncArg
from .format import Formatter
class Symbolic(object):
def __init__(self, source = None, ready_to_call = False):
self.__source = MetaArg("_") if source is None else source
self.__ready_to_call = ready_to_call
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""Handle numpy universal functions. E.g. np.sqrt(_)."""
return array_ufunc(self, ufunc, method, *inputs, **kwargs)
def __array_function__(self, func, types, args, kwargs):
return array_function(self, func, types, *args, **kwargs)
# since we override __eq__, we must explicitly set the hash method back to default
__hash__ = object.__hash__
# allowed methods ----
def __getattr__(self, x):
# temporary hack working around ipython pretty.py printing
#if x == "__class__": return Symbolic
return Symbolic(BinaryOp(
"__getattr__",
self.__source,
strip_symbolic(x)
))
def __call__(self, *args, **kwargs) -> "Symbolic":
if self.__ready_to_call:
return self.__source(*args, **kwargs)
return create_sym_call(self.__source, *args, **kwargs)
def __getitem__(self, x):
return Symbolic(BinaryOp(
"__getitem__",
self.__source,
slice_to_call(x),
),
ready_to_call = True)
def __invert__(self):
if isinstance(self.__source, Call) and self.__source.func == "__invert__":
return self.__source.args[0]
else:
return self.__op_invert()
def __op_invert(self):
return Symbolic(UnaryOp('__invert__', self.__source), ready_to_call = True)
def __rshift__(self, x):
# Note that this and __rrshift__ are copied from Call
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
lhs_call = self.__source
return self.__class__(Call._construct_pipe(lhs_call, stripped))
# strip_symbolic(self)(x)
# x is a symbolic
raise NotImplementedError("Symbolic may only be used on right-hand side of >> operator.")
def __rrshift__(self, x):
if isinstance(x, (Symbolic, Call)):
raise NotImplementedError()
return strip_symbolic(self)(x)
# banned methods ----
__contains__ = None
__iter__ = None
def __bool__(self):
raise TypeError("Symbolic objects can not be converted to True/False, or used "
"with these keywords: not, and, or.")
# representation ----
def __repr__(self):
return Formatter().format(self.__source)
# unary operators ----
# note that __invert__ is handled in a custom way above
__neg__ = create_unary_op("__neg__")
__pos__ = create_unary_op("__pos__")
__abs__ = create_unary_op("__abs__")
# binary operators ----
__add__ = create_binary_op("__add__")
__sub__ = create_binary_op("__sub__")
__mul__ = create_binary_op("__mul__")
__matmul__ = create_binary_op("__matmul__")
__truediv__ = create_binary_op("__truediv__")
__floordiv__ = create_binary_op("__floordiv__")
__mod__ = create_binary_op("__mod__")
__divmod__ = create_binary_op("__divmod__")
__pow__ = create_binary_op("__pow__")
__lshift__ = create_binary_op("__lshift__")
__and__ = create_binary_op("__and__")
__xor__ = create_binary_op("__xor__")
__or__ = create_binary_op("__or__")
__gt__ = create_binary_op("__gt__")
__lt__ = create_binary_op("__lt__")
__eq__ = create_binary_op("__eq__")
__ne__ = create_binary_op("__ne__")
__ge__ = create_binary_op("__ge__")
__le__ = create_binary_op("__le__")
__radd__ = create_binary_op("__radd__", False)
__rsub__ = create_binary_op("__rsub__", False)
__rmul__ = create_binary_op("__rmul__", False)
__rmatmul__ = create_binary_op("__rmatmul__", False)
__rtruediv__ = create_binary_op("__rtruediv__", False)
__rfloordiv__ = create_binary_op("__rfloordiv__", False)
__rmod__ = create_binary_op("__rmod__", False)
__rdivmod__ = create_binary_op("__rdivmod__", False)
__rpow__ = create_binary_op("__rpow__", False)
__rlshift__ = create_binary_op("__rlshift__", False)
__rand__ = create_binary_op("__rand__", False)
__rxor__ = create_binary_op("__rxor__", False)
__ror__ = create_binary_op("__ror__", False)
def strip_symbolic(x):
if isinstance(x, Symbolic):
return x.__dict__["_Symbolic__source"]
return x
def array_function(self, func, types, *args, **kwargs):
return func(*args, **kwargs)
class Call:
"""Represent python operations.
This class is responsible for representing the pieces of a python expression,
as a function, along with its args and kwargs.
For example, "some_object.a" would be represented at the function "__getattr__",
with the args `some_object`, and `"a"`.
Parameters
----------
func :
Name of the function called. Class methods are represented using the names
they have when defined on the class.
*args :
Arguments the function call uses.
**kwargs :
Keyword arguments the function call uses.
Examples
--------
>>> Call("__add__", 1, 1)
(1 + 1)
See Also
--------
siuba.siu.Symbolic : Helper class for creating Calls.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""Return a (best guess) python code representation of the Call.
Note that this is not necessarily valid python code (e.g. if a python
function is passed as a call argument).
"""
# TODO: format binary, unary, call, associative
if self.func in BINARY_OPS:
op_repr = BINARY_OPS[self.func]
fmt = "({args[0]} {func} {args[1]})"
elif self.func in UNARY_OPS:
op_repr = UNARY_OPS[self.func]
fmt = "({func}{args[0]})"
elif self.func == "getattr":
op_repr = "."
fmt = "({args[0]}{func}{args[1]})"
else:
op_repr, *arg_str = map(repr, self.args)
kwarg_str = (str(k) + " = " + repr(v) for k,v in self.kwargs.items())
combined_arg_str = ",".join(itertools.chain(arg_str, kwarg_str))
fmt = "{}({})".format(op_repr, combined_arg_str)
return fmt
return fmt.format(
func = op_repr or self.func,
args = self.args,
kwargs = self.kwargs
)
def __call__(self, x):
"""Evaluate a call over some context and return the result.
Note that subclasses like MetaArg, simply return the context, so that a call
acts like a unary function, with ``x`` as its argument.
Parameters
----------
x :
Object passed down the call tree as context.
Examples
--------
>>> expr = Call("__add__", MetaArg("_"), 2)
>>> expr(1) # 1 + 2
3
"""
args, kwargs = self.map_subcalls(self.evaluate_calls, args = (x,))
inst, *rest = args
#inst, *rest = (self.evaluate_calls(arg, x) for arg in self.args)
#kwargs = {k: self.evaluate_calls(v, x) for k, v in self.kwargs.items()}
# TODO: temporary workaround, for when only __get_attribute__ is defined
if self.func == "__getattr__":
return getattr(inst, *rest)
elif self.func == "__getitem__":
return operator.getitem(inst, *rest)
elif self.func == "__call__":
return getattr(inst, self.func)(*rest, **kwargs)
# in normal case, get method to call, and then call it
f_op = getattr(operator, self.func)
return f_op(inst, *rest, **kwargs)
# TODO: type checks will be very useful here. Will need to import symbolic.
# Let's do this once types are in a _typing.py submodule.
def __rshift__(self, x):
"""Create a"""
from .symbolic import strip_symbolic
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
return self._construct_pipe(self, x)
raise TypeError()
def __rrshift__(self, x):
from .symbolic import strip_symbolic
if isinstance(strip_symbolic(x), (Call)):
# only allow non-calls (i.e. data) on the left.
raise TypeError()
return self(x)
def evaluate_calls(arg, x):
if isinstance(arg, Call): return arg(x)
return arg
def copy(self) -> "Call":
"""Return a copy of this call object.
Note that copies are made of child calls, but not their arguments.
"""
args, kwargs = self.map_subcalls(lambda child: child.copy())
return self.__class__(self.func, *args, **kwargs)
def map_subcalls(self, f, args = tuple(), kwargs = None):
"""Call a function on all child calls.
Parameters
----------
f :
A function to call on any child calls.
args:
Optional position arguments to pass to ``f``.
kwargs:
Optional keyword arguments to pass to ``f``.
Returns
-------
A tuple of (new_args, new_kwargs) that can be used to recreate the original
Call object with transformed (copies of) child nodes.
See Also
--------
copy : Recursively calls map_subcalls to clone a call tree.
"""
if kwargs is None: kwargs = {}
new_args = tuple(f(arg, *args, **kwargs) if isinstance(arg, Call) else arg for arg in self.args)
new_kwargs = {k: f(v, *args, **kwargs) if isinstance(v, Call) else v for k,v in self.kwargs.items()}
return new_args, new_kwargs
def map_replace(self, f):
args, kwargs = self.map_subcalls(f)
return self.__class__(self.func, *args, **kwargs)
def iter_subcalls(self, f):
yield from iter(arg for arg in self.args if instance(arg, Call))
yield from iter(v for k,v in self.kwargs.items() if isinstance(v, Call))
def op_vars(self, attr_calls = True):
"""Return set of all variable names used in Call
Args:
attr_calls: whether to include called attributes (e.g. 'a' from _.a())
"""
varnames = set()
op_var = self._get_op_var()
if op_var is not None:
varnames.add(op_var)
if (not attr_calls
and self.func == "__call__"
and isinstance(self.args[0], Call)
and self.args[0].func == "__getattr__"
):
# skip obj, since it fetches an attribute this node is calling
prev_obj, prev_attr = self.args[0].args
all_args = itertools.chain([prev_obj], self.args[1:], self.kwargs.values())
else:
all_args = itertools.chain(self.args, self.kwargs.values())
for arg in all_args:
if isinstance(arg, Call):
varnames.update(arg.op_vars(attr_calls = attr_calls))
return varnames
def _get_op_var(self):
if self.func in ("__getattr__", "__getitem__") and isinstance(self.args[1], str):
return self.args[1]
def obj_name(self):
obj = self.args[0]
if isinstance(obj, Call):
if obj.func == "__getattr__":
return obj.args[0]
elif hasattr(obj, '__name__'):
return obj.__name__
return None
def _construct_pipe(lhs, rhs):
if isinstance(lhs, PipeCall):
lh_args = lhs.args
# ensure we don't keep adding MetaArg to the left when
# combining two pipes
if lh_args and isinstance(lh_args[0], MetaArg):
lh_args = lh_args[1:]
else:
lh_args = [lhs]
if isinstance(rhs, PipeCall):
rh_args = rhs.args
# similar to above, but for rh args
if rh_args and isinstance(rh_args[0], MetaArg):
rh_args = rh_args[1:]
else:
rh_args = [rhs]
return PipeCall(MetaArg("_"), *lh_args, *rh_args)
def _array_function_sym(self, func, types, *args, **kwargs):
f_concrete = array_function.dispatch(Call)
call = f_concrete(
strip_symbolic(self),
func,
types,
*map(strip_symbolic, args),
**{k: strip_symbolic(v) for k, v in kwargs.items()}
)
return Symbolic(call) | null |
158,843 | from functools import singledispatch
from .calls import Call, BinaryOp, BinaryRightOp, MetaArg, UnaryOp, SliceOp, FuncArg
from .format import Formatter
def array_ufunc(self, ufunc, method, *inputs, **kwargs):
class Call:
def __init__(self, func, *args, **kwargs):
def __repr__(self):
def __call__(self, x):
def __rshift__(self, x):
def __rrshift__(self, x):
def evaluate_calls(arg, x):
def copy(self) -> "Call":
def map_subcalls(self, f, args = tuple(), kwargs = None):
def map_replace(self, f):
def iter_subcalls(self, f):
def op_vars(self, attr_calls = True):
def _get_op_var(self):
def obj_name(self):
def _construct_pipe(lhs, rhs):
class FuncArg(Call):
def __init__(self, func, *args, **kwargs):
def __repr__(self):
def __call__(self, x):
def _array_ufunc_call(self, ufunc, method, *inputs, **kwargs):
return Call("__call__", FuncArg(array_ufunc), self, ufunc, method, *inputs, **kwargs) | null |
158,844 | from functools import singledispatch
from .calls import Call, BinaryOp, BinaryRightOp, MetaArg, UnaryOp, SliceOp, FuncArg
from .format import Formatter
class Symbolic(object):
def __init__(self, source = None, ready_to_call = False):
self.__source = MetaArg("_") if source is None else source
self.__ready_to_call = ready_to_call
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""Handle numpy universal functions. E.g. np.sqrt(_)."""
return array_ufunc(self, ufunc, method, *inputs, **kwargs)
def __array_function__(self, func, types, args, kwargs):
return array_function(self, func, types, *args, **kwargs)
# since we override __eq__, we must explicitly set the hash method back to default
__hash__ = object.__hash__
# allowed methods ----
def __getattr__(self, x):
# temporary hack working around ipython pretty.py printing
#if x == "__class__": return Symbolic
return Symbolic(BinaryOp(
"__getattr__",
self.__source,
strip_symbolic(x)
))
def __call__(self, *args, **kwargs) -> "Symbolic":
if self.__ready_to_call:
return self.__source(*args, **kwargs)
return create_sym_call(self.__source, *args, **kwargs)
def __getitem__(self, x):
return Symbolic(BinaryOp(
"__getitem__",
self.__source,
slice_to_call(x),
),
ready_to_call = True)
def __invert__(self):
if isinstance(self.__source, Call) and self.__source.func == "__invert__":
return self.__source.args[0]
else:
return self.__op_invert()
def __op_invert(self):
return Symbolic(UnaryOp('__invert__', self.__source), ready_to_call = True)
def __rshift__(self, x):
# Note that this and __rrshift__ are copied from Call
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
lhs_call = self.__source
return self.__class__(Call._construct_pipe(lhs_call, stripped))
# strip_symbolic(self)(x)
# x is a symbolic
raise NotImplementedError("Symbolic may only be used on right-hand side of >> operator.")
def __rrshift__(self, x):
if isinstance(x, (Symbolic, Call)):
raise NotImplementedError()
return strip_symbolic(self)(x)
# banned methods ----
__contains__ = None
__iter__ = None
def __bool__(self):
raise TypeError("Symbolic objects can not be converted to True/False, or used "
"with these keywords: not, and, or.")
# representation ----
def __repr__(self):
return Formatter().format(self.__source)
# unary operators ----
# note that __invert__ is handled in a custom way above
__neg__ = create_unary_op("__neg__")
__pos__ = create_unary_op("__pos__")
__abs__ = create_unary_op("__abs__")
# binary operators ----
__add__ = create_binary_op("__add__")
__sub__ = create_binary_op("__sub__")
__mul__ = create_binary_op("__mul__")
__matmul__ = create_binary_op("__matmul__")
__truediv__ = create_binary_op("__truediv__")
__floordiv__ = create_binary_op("__floordiv__")
__mod__ = create_binary_op("__mod__")
__divmod__ = create_binary_op("__divmod__")
__pow__ = create_binary_op("__pow__")
__lshift__ = create_binary_op("__lshift__")
__and__ = create_binary_op("__and__")
__xor__ = create_binary_op("__xor__")
__or__ = create_binary_op("__or__")
__gt__ = create_binary_op("__gt__")
__lt__ = create_binary_op("__lt__")
__eq__ = create_binary_op("__eq__")
__ne__ = create_binary_op("__ne__")
__ge__ = create_binary_op("__ge__")
__le__ = create_binary_op("__le__")
__radd__ = create_binary_op("__radd__", False)
__rsub__ = create_binary_op("__rsub__", False)
__rmul__ = create_binary_op("__rmul__", False)
__rmatmul__ = create_binary_op("__rmatmul__", False)
__rtruediv__ = create_binary_op("__rtruediv__", False)
__rfloordiv__ = create_binary_op("__rfloordiv__", False)
__rmod__ = create_binary_op("__rmod__", False)
__rdivmod__ = create_binary_op("__rdivmod__", False)
__rpow__ = create_binary_op("__rpow__", False)
__rlshift__ = create_binary_op("__rlshift__", False)
__rand__ = create_binary_op("__rand__", False)
__rxor__ = create_binary_op("__rxor__", False)
__ror__ = create_binary_op("__ror__", False)
def strip_symbolic(x):
if isinstance(x, Symbolic):
return x.__dict__["_Symbolic__source"]
return x
def array_ufunc(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*inputs, **kwargs)
class Call:
"""Represent python operations.
This class is responsible for representing the pieces of a python expression,
as a function, along with its args and kwargs.
For example, "some_object.a" would be represented at the function "__getattr__",
with the args `some_object`, and `"a"`.
Parameters
----------
func :
Name of the function called. Class methods are represented using the names
they have when defined on the class.
*args :
Arguments the function call uses.
**kwargs :
Keyword arguments the function call uses.
Examples
--------
>>> Call("__add__", 1, 1)
(1 + 1)
See Also
--------
siuba.siu.Symbolic : Helper class for creating Calls.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""Return a (best guess) python code representation of the Call.
Note that this is not necessarily valid python code (e.g. if a python
function is passed as a call argument).
"""
# TODO: format binary, unary, call, associative
if self.func in BINARY_OPS:
op_repr = BINARY_OPS[self.func]
fmt = "({args[0]} {func} {args[1]})"
elif self.func in UNARY_OPS:
op_repr = UNARY_OPS[self.func]
fmt = "({func}{args[0]})"
elif self.func == "getattr":
op_repr = "."
fmt = "({args[0]}{func}{args[1]})"
else:
op_repr, *arg_str = map(repr, self.args)
kwarg_str = (str(k) + " = " + repr(v) for k,v in self.kwargs.items())
combined_arg_str = ",".join(itertools.chain(arg_str, kwarg_str))
fmt = "{}({})".format(op_repr, combined_arg_str)
return fmt
return fmt.format(
func = op_repr or self.func,
args = self.args,
kwargs = self.kwargs
)
def __call__(self, x):
"""Evaluate a call over some context and return the result.
Note that subclasses like MetaArg, simply return the context, so that a call
acts like a unary function, with ``x`` as its argument.
Parameters
----------
x :
Object passed down the call tree as context.
Examples
--------
>>> expr = Call("__add__", MetaArg("_"), 2)
>>> expr(1) # 1 + 2
3
"""
args, kwargs = self.map_subcalls(self.evaluate_calls, args = (x,))
inst, *rest = args
#inst, *rest = (self.evaluate_calls(arg, x) for arg in self.args)
#kwargs = {k: self.evaluate_calls(v, x) for k, v in self.kwargs.items()}
# TODO: temporary workaround, for when only __get_attribute__ is defined
if self.func == "__getattr__":
return getattr(inst, *rest)
elif self.func == "__getitem__":
return operator.getitem(inst, *rest)
elif self.func == "__call__":
return getattr(inst, self.func)(*rest, **kwargs)
# in normal case, get method to call, and then call it
f_op = getattr(operator, self.func)
return f_op(inst, *rest, **kwargs)
# TODO: type checks will be very useful here. Will need to import symbolic.
# Let's do this once types are in a _typing.py submodule.
def __rshift__(self, x):
"""Create a"""
from .symbolic import strip_symbolic
stripped = strip_symbolic(x)
if isinstance(stripped, Call):
return self._construct_pipe(self, x)
raise TypeError()
def __rrshift__(self, x):
from .symbolic import strip_symbolic
if isinstance(strip_symbolic(x), (Call)):
# only allow non-calls (i.e. data) on the left.
raise TypeError()
return self(x)
def evaluate_calls(arg, x):
if isinstance(arg, Call): return arg(x)
return arg
def copy(self) -> "Call":
"""Return a copy of this call object.
Note that copies are made of child calls, but not their arguments.
"""
args, kwargs = self.map_subcalls(lambda child: child.copy())
return self.__class__(self.func, *args, **kwargs)
def map_subcalls(self, f, args = tuple(), kwargs = None):
"""Call a function on all child calls.
Parameters
----------
f :
A function to call on any child calls.
args:
Optional position arguments to pass to ``f``.
kwargs:
Optional keyword arguments to pass to ``f``.
Returns
-------
A tuple of (new_args, new_kwargs) that can be used to recreate the original
Call object with transformed (copies of) child nodes.
See Also
--------
copy : Recursively calls map_subcalls to clone a call tree.
"""
if kwargs is None: kwargs = {}
new_args = tuple(f(arg, *args, **kwargs) if isinstance(arg, Call) else arg for arg in self.args)
new_kwargs = {k: f(v, *args, **kwargs) if isinstance(v, Call) else v for k,v in self.kwargs.items()}
return new_args, new_kwargs
def map_replace(self, f):
args, kwargs = self.map_subcalls(f)
return self.__class__(self.func, *args, **kwargs)
def iter_subcalls(self, f):
yield from iter(arg for arg in self.args if instance(arg, Call))
yield from iter(v for k,v in self.kwargs.items() if isinstance(v, Call))
def op_vars(self, attr_calls = True):
"""Return set of all variable names used in Call
Args:
attr_calls: whether to include called attributes (e.g. 'a' from _.a())
"""
varnames = set()
op_var = self._get_op_var()
if op_var is not None:
varnames.add(op_var)
if (not attr_calls
and self.func == "__call__"
and isinstance(self.args[0], Call)
and self.args[0].func == "__getattr__"
):
# skip obj, since it fetches an attribute this node is calling
prev_obj, prev_attr = self.args[0].args
all_args = itertools.chain([prev_obj], self.args[1:], self.kwargs.values())
else:
all_args = itertools.chain(self.args, self.kwargs.values())
for arg in all_args:
if isinstance(arg, Call):
varnames.update(arg.op_vars(attr_calls = attr_calls))
return varnames
def _get_op_var(self):
if self.func in ("__getattr__", "__getitem__") and isinstance(self.args[1], str):
return self.args[1]
def obj_name(self):
obj = self.args[0]
if isinstance(obj, Call):
if obj.func == "__getattr__":
return obj.args[0]
elif hasattr(obj, '__name__'):
return obj.__name__
return None
def _construct_pipe(lhs, rhs):
if isinstance(lhs, PipeCall):
lh_args = lhs.args
# ensure we don't keep adding MetaArg to the left when
# combining two pipes
if lh_args and isinstance(lh_args[0], MetaArg):
lh_args = lh_args[1:]
else:
lh_args = [lhs]
if isinstance(rhs, PipeCall):
rh_args = rhs.args
# similar to above, but for rh args
if rh_args and isinstance(rh_args[0], MetaArg):
rh_args = rh_args[1:]
else:
rh_args = [rhs]
return PipeCall(MetaArg("_"), *lh_args, *rh_args)
def _array_ufunc_sym(self, ufunc, method, *inputs, **kwargs):
f_concrete = array_ufunc.dispatch(Call)
call = f_concrete(
strip_symbolic(self),
ufunc,
method,
*map(strip_symbolic, inputs),
**{k: strip_symbolic(v) for k, v in kwargs.items()}
)
return Symbolic(call) | null |
158,845 | from importlib.abc import Loader, MetaPathFinder
from importlib.machinery import ModuleSpec
from importlib.util import find_spec
import importlib
from types import ModuleType
import sys
import warnings
from functools import wraps
import pdb
import readline
from .siu import Symbolic, Call
def lazy_func(f):
@wraps(f)
def wrapper(*args, **kwargs):
return Symbolic(source = f)(*args, **kwargs)
return wrapper | null |
158,846 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base,
SqlTranslator,
sql_not_impl,
win_cumul,
win_agg,
annotate,
wrap_annotate
)
from ..utils import SQLA_VERSION
from .base import base_nowin
from . import _dt_generics as _dt
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
class SqliteColumn(SqlColumn): pass
class SqlColumn(SqlBase): pass
def wrap_annotate(f, **kwargs):
from functools import wraps
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
wrapper.operation = kwargs
return wrapper
def returns_float(func_names):
# TODO: MC-NOTE - shift all translations to directly register
# TODO: MC-NOTE - make an AliasAnnotated class or something, that signals
# it is using another method, but w/ an updated annotation.
from siuba.ops import ALL_OPS
for name in func_names:
generic = ALL_OPS[name]
f_concrete = generic.dispatch(SqlColumn)
f_annotated = wrap_annotate(f_concrete, result_type="float")
generic.register(SqliteColumn, f_annotated) | null |
158,847 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base,
SqlTranslator,
sql_not_impl,
win_cumul,
win_agg,
annotate,
wrap_annotate
)
from ..utils import SQLA_VERSION
from .base import base_nowin
from . import _dt_generics as _dt
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_extract(name):
if name == "quarter":
# division in sqlite automatically rounds down
# so for jan, 1 + 2 = 3, and 3 / 1 is Q1
return lambda _, col: (fn.strftime("%m", col) + 2) / 3
return lambda _, col: fn.extract(name, col) | null |
158,848 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base,
SqlTranslator,
sql_not_impl,
win_cumul,
win_agg,
annotate,
wrap_annotate
)
from ..utils import SQLA_VERSION
from .base import base_nowin
from . import _dt_generics as _dt
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
class SqliteColumn(SqlColumn): pass
def _sql_is_last_day_of(codata: SqliteColumn, col, period):
valid_periods = {"month", "year"}
if period not in valid_periods:
raise ValueError(f"Period must be one of {valid_periods}")
incr = f"+1 {period}"
target_date = fn.date(col, f'start of {period}', incr, "-1 day")
return col == target_date | null |
158,849 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base,
SqlTranslator,
sql_not_impl,
win_cumul,
win_agg,
annotate,
wrap_annotate
)
from ..utils import SQLA_VERSION
from .base import base_nowin
from . import _dt_generics as _dt
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
class SqliteColumn(SqlColumn): pass
def _sql_is_first_day_of(codata: SqliteColumn, col, period):
valid_periods = {"month", "year"}
if period not in valid_periods:
raise ValueError(f"Period must be one of {valid_periods}")
target_date = fn.date(col, f'start of {period}')
return fn.date(col) == target_date | null |
158,850 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base,
SqlTranslator,
sql_not_impl,
win_cumul,
win_agg,
annotate,
wrap_annotate
)
from ..utils import SQLA_VERSION
from .base import base_nowin
from . import _dt_generics as _dt
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_days_in_month(_, col):
date_last_day = fn.date(col, 'start of month', '+1 month', '-1 day')
return fn.strftime("%d", date_last_day).cast(sa_types.Integer()) | null |
158,851 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base,
SqlTranslator,
sql_not_impl,
win_cumul,
win_agg,
annotate,
wrap_annotate
)
from ..utils import SQLA_VERSION
from .base import base_nowin
from . import _dt_generics as _dt
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
SQLA_VERSION=tuple(map(int, re.match(RE_VERSION, sqlalchemy.__version__).groups()))
def sql_week_of_year(_, col):
# convert sqlite week to ISO week
# adapted from: https://stackoverflow.com/a/15511864
iso_dow = (fn.strftime("%j", fn.date(col, "-3 days", "weekday 4")) - 1)
if SQLA_VERSION >= (2, 0, 0):
# in v2, regular division will cause sqlalchemy to coerce the 7 to a float
return (iso_dow // 7) + 1
return (iso_dow / 7) + 1 | null |
158,852 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base,
SqlTranslator,
sql_not_impl,
win_cumul,
win_agg,
annotate,
wrap_annotate
)
from ..utils import SQLA_VERSION
from .base import base_nowin
from . import _dt_generics as _dt
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_round(_, col, n):
return sql.func.round(col, n) | null |
158,853 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base,
SqlTranslator,
sql_not_impl,
win_cumul,
win_agg,
annotate,
wrap_annotate
)
from ..utils import SQLA_VERSION
from .base import base_nowin
from . import _dt_generics as _dt
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_func_truediv(_, x, y):
return sql.cast(x, sa_types.Float()) / y | null |
158,854 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base,
SqlTranslator,
sql_not_impl,
win_cumul,
win_agg,
annotate,
wrap_annotate
)
from ..utils import SQLA_VERSION
from .base import base_nowin
from . import _dt_generics as _dt
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_str_capitalize(_, col):
# capitalize first letter, then concatenate with lowercased rest
first_upper = fn.upper(fn.substr(col, 1, 1))
rest_lower = fn.lower(fn.substr(col, 2))
return first_upper.concat(rest_lower) | null |
158,855 | from sqlalchemy.sql import func as fn
from sqlalchemy import sql
from ..translate import (
SqlTranslator,
extend_base,
sql_scalar,
sql_agg,
win_agg,
win_cumul,
annotate
)
from .base import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
class SnowflakeColumn(SqlColumn): pass
def sql_func_last_day_in_period(codata: SnowflakeColumn, col, period):
return _dt.date_trunc(codata, col, period) \
+ sql.text("interval '1 %s'" % period) \
- sql.text("interval '1 day'") | null |
158,856 | from siuba.siu import symbolic_dispatch
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from sqlalchemy.sql import func as fn
from sqlalchemy import types as sa_types
from sqlalchemy import sql
def date_trunc(_, col, period):
def sql_is_first_day_of(codata, col, period):
return date_trunc(codata, col, "day") == date_trunc(codata, col, period) | null |
158,857 | from siuba.siu import symbolic_dispatch
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from sqlalchemy.sql import func as fn
from sqlalchemy import types as sa_types
from sqlalchemy import sql
def date_trunc(_, col, period):
return fn.date_trunc(period, col)
def sql_func_last_day_in_period(codata, col, period):
return date_trunc(codata, col, period) + sql.text("INTERVAL '1 %s' - INTERVAL '1 day'" % period)
def sql_is_last_day_of(codata, col, period):
last_day = sql_func_last_day_in_period(codata, col, period)
return date_trunc(codata, col, 'day') == last_day | null |
158,858 | from siuba.siu import symbolic_dispatch
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from sqlalchemy.sql import func as fn
from sqlalchemy import types as sa_types
from sqlalchemy import sql
def sql_is_date_offset(period, is_start = True):
# will check against one day in the past for is_start, v.v. otherwise
fn_add = fn.date_sub if is_start else fn.date_add
def f(_, col):
get_period = lambda col: fn.extract(period, col)
src_per = get_period(col)
incr_per = get_period(fn_add(col, sql.text("INTERVAL 1 DAY")))
return src_per != incr_per
return f | null |
158,859 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator, sql_not_impl, win_absent
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_str_strip(left = True, right = True):
def f(_, col):
# see https://stackoverflow.com/a/6858168/1144523
lstrip = "^[[:space:]]+" if left else ""
rstrip = "[[:space:]]+$" if right else ""
or_op = "|" if lstrip and rstrip else ""
regex = "(" + lstrip + or_op + rstrip + ")"
return fn.regexp_replace(col, regex, "")
return f | null |
158,860 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator, sql_not_impl, win_absent
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_func_extract_dow_monday(_, col):
# MYSQL: sunday starts, equals 1 (an int)
# pandas: monday starts, equals 0 (also an int)
raw_dow = fn.dayofweek(col)
# monday is 2 in MYSQL, so use monday + 5 % 7
return (raw_dow + 5) % 7 | null |
158,861 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator, sql_not_impl, win_absent
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_is_first_day_of(_, col, period):
src_per = fn.extract(period, col)
incr_per = fn.extract(period, fn.date_sub(col, sql.text("INTERVAL 1 DAY")))
return src_per != incr_per | null |
158,862 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator, sql_not_impl, win_absent
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_is_last_day_of(_, col, period):
src_per = fn.extract(period, col)
incr_per = fn.extract(period, fn.date_add(col, sql.text("INTERVAL 1 DAY")))
return src_per != incr_per | null |
158,863 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator, sql_not_impl, win_absent
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_func_truediv(_, x, y):
return sql.cast(x, sa_types.Numeric()) / y | null |
158,864 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator, sql_not_impl, win_absent
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_func_floordiv(_, x, y):
return x.op("DIV")(y) | null |
158,865 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator, sql_not_impl, win_absent
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_func_between(_, col, left, right, inclusive=True):
if not inclusive:
raise NotImplementedError("between must be inclusive")
# TODO: should figure out how sqlalchemy prefers to set types, rather
# than setting manually on this expression
expr = col.between(left, right)
expr.type = sa_types.Boolean()
return expr | null |
158,866 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
class CumlOver(CustomOverClause):
"""Over clause for cumulative versions of functions like sum, min, max.
Note that this class is also currently used for aggregates that might require
ordering, like nth, first, etc..
e.g. cumsum(x) -> SUM(x) OVER (partition by <group vars> order by <order vars>)
e.g. nth(0) -> NTH_VALUE(1) OVER (partition by <group vars> order by <order vars>)
"""
def set_over(self, group_by, order_by):
self.partition_by = group_by
# do not override order by if it was set by the user. this might happen
# in functions like nth, which gives the option to set it.
if self.order_by is None or not len(self.order_by):
if not len(order_by):
warnings.warn(
"No order by columns explicitly set in window function. SQL engine"
"does not guarantee a row ordering. Recommend using an arrange beforehand.",
RuntimeWarning
)
self.order_by = order_by
return self
def func(cls, name, rows=(None, 0)):
sa_func = getattr(sql.func, name)
def f(codata, col, *args, **kwargs) -> CumlOver:
return cls(sa_func(col, *args, **kwargs), rows = rows)
return f
def sql_func_diff(_, col, periods = 1):
if periods > 0:
return CumlOver(col - sql.func.lag(col, periods))
elif periods < 0:
return CumlOver(col - sql.func.lead(col, abs(periods)))
raise ValueError("periods argument to sql diff cannot be 0") | null |
158,867 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
SQLA_VERSION=tuple(map(int, re.match(RE_VERSION, sqlalchemy.__version__).groups()))
def sql_func_floordiv(_, x, y):
if SQLA_VERSION >= (2, 0, 0):
return sql.cast(x // y, sa_types.Integer())
return sql.cast(x / y, sa_types.Integer()) | null |
158,868 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
class RankOver(CustomOverClause):
"""Over clause for ranking functions.
Note that in python we might call rank(col), but in SQL the ranking column
is defined using order by.
E.g. rank(y) -> rank() OVER (partition by <group vars> order by y)
"""
def set_over(self, group_by, order_by = None):
crnt_partition = getattr(self.partition_by, 'clauses', tuple())
self.partition_by = sql.elements.ClauseList(*crnt_partition, *group_by.clauses)
return self
def func(cls, name):
sa_func = getattr(sql.func, name)
def f(codata, col) -> RankOver:
return cls(sa_func(), order_by = col)
return f
def sql_func_rank(_, col):
# see https://stackoverflow.com/a/36823637/1144523
min_rank = RankOver(sql.func.rank(), order_by = col)
to_mean = (RankOver(sql.func.count(), partition_by = col) - 1) / 2.0
return min_rank + to_mean | null |
158,869 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
The provided code snippet includes necessary dependencies for implementing the `sql_extract` function. Write a Python function `def sql_extract(name)` to solve the following problem:
Return function that extracts named component from column. E.g. to produce EXTRACT(MONTH, some_column)
Here is the function:
def sql_extract(name):
"""Return function that extracts named component from column.
E.g. to produce EXTRACT(MONTH, some_column)
"""
return lambda _, col: fn.extract(name, col) | Return function that extracts named component from column. E.g. to produce EXTRACT(MONTH, some_column) |
158,870 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
def sql_func_days_in_month(codata, col):
return fn.extract('day', _dt.sql_func_last_day_in_period(codata, col, 'month')) | null |
158,871 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
def sql_func_extract_dow_monday(_, col):
# make monday = 0 rather than sunday
monday0 = sql.cast(sql.func.extract('dow', col) + 6, sa_types.Integer) % 7
# cast to numeric, since that's what extract('dow') returns
return sql.cast(monday0, sa_types.Numeric) | null |
158,872 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
def sql_func_floor_date(_, col, unit):
# see https://www.postgresql.org/docs/9.1/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
# valid values:
# microseconds, milliseconds, second, minute, hour,
# day, week, month, quarter, year, decade, century, millennium
# TODO: implement in siuba.dply.lubridate
return fn.date_trunc(unit, col) | null |
158,873 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
def sql_str_strip(name):
strip_func = getattr(fn, name)
def f(_, col, to_strip = " \t\n\v\f\r"):
return strip_func(col, to_strip)
return f | null |
158,874 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
def sql_func_capitalize(_, col):
first_char = fn.upper(fn.left(col, 1))
rest = fn.right(col, fn.length(col) - 1)
return sql.functions.concat(first_char, rest) | null |
158,875 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
def sql_str_cat(_, col, others=None, sep=None, na_rep=None, join=None):
if sep is not None:
raise NotImplementedError("sep argument not supported for sql cat")
if na_rep is not None:
raise NotImplementedError("na_rep argument not supported for sql cat")
if join is not None:
raise NotImplementedError("join argument not supported for sql cat")
if isinstance(others, (list, tuple)):
raise NotImplementedError("others argument must be a single column for sql cat")
return sql.functions.concat(col, others) | null |
158,876 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
def sql_func_astype(_, col, _type):
mappings = {
str: sa_types.Text,
'str': sa_types.Text,
int: sa_types.Integer,
'int': sa_types.Integer,
float: sa_types.Float,
'float': sa_types.Float,
bool: sa_types.Boolean,
'bool': sa_types.Boolean
}
try:
sa_type = mappings[_type]
except KeyError:
raise ValueError("sql astype currently only supports type objects: str, int, float, bool")
return sql.cast(col, sa_type) | null |
158,877 | from functools import partial
from sqlalchemy import sql
from sqlalchemy import types as sa_types
from sqlalchemy.sql import func as fn
from sqlalchemy.sql.elements import ColumnClause
from siuba.sql.translate import (
extend_base,
win_absent,
win_over,
win_cumul,
win_agg,
sql_agg,
sql_scalar,
sql_colmeth,
sql_ordered_set,
sql_not_impl,
annotate,
RankOver,
CumlOver,
SqlTranslator,
FunctionLookupBound
)
from siuba.sql.utils import SQLA_VERSION
from siuba.sql.translate import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
from siuba.siu.symbolic import array_ufunc, array_function
def annotate(f = None, **kwargs):
# allow it to work as a decorator
if f is None:
return lambda f: annotate(f, **kwargs)
if hasattr(f, "operation"):
raise ValueError("function already has an operation attribute")
f.operation = kwargs
return f
def req_bool(f):
return annotate(f, input_type = "bool") | null |
158,878 | from sqlalchemy.sql import func as fn
from sqlalchemy import sql
from ..translate import (
# data types
SqlColumn, SqlColumnAgg,
AggOver,
# transformations
wrap_annotate,
sql_agg,
win_agg,
win_cumul,
sql_not_impl,
# wiring up translator
extend_base,
SqlTranslator,
convert_literal
)
from .postgresql import (
PostgresqlColumn,
PostgresqlColumnAgg,
)
from .base import sql_func_rank
class DuckdbColumn(PostgresqlColumn): pass
def returns_int(func_names):
# TODO: MC-NOTE - shift all translations to directly register
# TODO: MC-NOTE - make an AliasAnnotated class or something, that signals
# it is using another method, but w/ an updated annotation.
from siuba.ops import ALL_OPS
for name in func_names:
generic = ALL_OPS[name]
f_concrete = generic.dispatch(SqlColumn)
f_annotated = wrap_annotate(f_concrete, result_type="int")
generic.register(DuckdbColumn, f_annotated) | null |
158,879 | from sqlalchemy.sql import func as fn
from sqlalchemy import sql
from ..translate import (
# data types
SqlColumn, SqlColumnAgg,
AggOver,
# transformations
wrap_annotate,
sql_agg,
win_agg,
win_cumul,
sql_not_impl,
# wiring up translator
extend_base,
SqlTranslator,
convert_literal
)
from .postgresql import (
PostgresqlColumn,
PostgresqlColumnAgg,
)
from .base import sql_func_rank
class DuckdbColumn(PostgresqlColumn):
def _cl_duckdb(codata: DuckdbColumn, lit):
from sqlalchemy.dialects.postgresql import array
if isinstance(lit, list):
return array(lit)
return sql.literal(lit) | null |
158,880 | from sqlalchemy.sql import func as fn
from sqlalchemy import sql
from ..translate import (
# data types
SqlColumn, SqlColumnAgg,
AggOver,
# transformations
wrap_annotate,
sql_agg,
win_agg,
win_cumul,
sql_not_impl,
# wiring up translator
extend_base,
SqlTranslator,
convert_literal
)
from .postgresql import (
PostgresqlColumn,
PostgresqlColumnAgg,
)
from .base import sql_func_rank
class AggOver(CustomOverClause):
def set_over(self, group_by, order_by = None):
def func(cls, name):
def f(codata, col, *args, **kwargs) -> AggOver:
def sql_quantile(is_analytic=False):
# Ordered and theoretical set aggregates
sa_func = getattr(sql.func, "percentile_cont")
def f_quantile(codata, col, q, *args):
if args:
raise NotImplementedError("Quantile only supports the q argument.")
if not isinstance(q, (int, float)):
raise TypeError("q argument must be int or float, but received: %s" %type(q))
# as far as I can tell, there's no easy way to tell sqlalchemy to render
# the exact text a dialect would render for a literal (except maybe using
# literal_column), so use the classic sql.text.
q_text = sql.text(str(q))
if is_analytic:
return AggOver(sa_func(sql.text(q_text)).within_group(col))
return sa_func(q_text).within_group(col)
return f_quantile | null |
158,881 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator,
annotate, sql_scalar, sql_agg, win_cumul, sql_not_impl,
AggOver, RankOver, Over,
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_floordiv(_, x, y):
return sql.func.floor(x / y) | null |
158,882 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator,
annotate, sql_scalar, sql_agg, win_cumul, sql_not_impl,
AggOver, RankOver, Over,
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
class BigqueryColumn(SqlColumn): pass
def _date_trunc(_: BigqueryColumn, col, name):
return fn.datetime_trunc(col, sql.text(name))
def sql_func_last_day_in_period(_: BigqueryColumn, col, period):
return fn.last_day(col, sql.text(period))
def sql_is_last_day_of(codata: BigqueryColumn, col, period):
last_day = sql_func_last_day_in_period(codata, col, period)
return _date_trunc(codata, col, "DAY") == last_day | null |
158,883 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator,
annotate, sql_scalar, sql_agg, win_cumul, sql_not_impl,
AggOver, RankOver, Over,
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_extract(field):
return lambda _, col: fn.extract(field, col) | null |
158,884 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator,
annotate, sql_scalar, sql_agg, win_cumul, sql_not_impl,
AggOver, RankOver, Over,
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_str_replace(_, col, pat, repl, n=-1, case=None, flags=0, regex=True):
if n != -1 or case is not None or flags != 0:
raise NotImplementedError("only pat and repl arguments supported in sql")
if regex:
return fn.regexp_replace(col, pat, repl)
return fn.replace(col, pat, repl) | null |
158,885 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator,
annotate, sql_scalar, sql_agg, win_cumul, sql_not_impl,
AggOver, RankOver, Over,
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
def sql_str_contains(_, col, pat, case=None, flags=0, na=None, regex=True):
if case is not None or flags != 0:
raise NotImplementedError("only pat and repl arguments supported in sql")
if regex:
return fn.regexp_contains(col, pat)
return col.contains(pat) | null |
158,886 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator,
annotate, sql_scalar, sql_agg, win_cumul, sql_not_impl,
AggOver, RankOver, Over,
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
class AggOver(CustomOverClause):
"""Over clause for uses of functions min, max, avg, that return one value.
Note that this class does not set order by, which is how these functions
generally become their cumulative versions.
E.g. mean(x) -> AVG(x) OVER (partition_by <group vars>)
"""
def set_over(self, group_by, order_by = None):
self.partition_by = group_by
return self
def func(cls, name):
sa_func = getattr(sql.func, name)
def f(codata, col, *args, **kwargs) -> AggOver:
return cls(sa_func(col, *args, **kwargs))
return f
def annotate(f = None, **kwargs):
# allow it to work as a decorator
if f is None:
return lambda f: annotate(f, **kwargs)
if hasattr(f, "operation"):
raise ValueError("function already has an operation attribute")
f.operation = kwargs
return f
def sql_any(window = False):
f_win = AggOver if window else lambda x: x
@annotate(input_type="bool")
def f(_, col):
return f_win(fn.sum(fn.cast(col, sa_types.Integer()))) != 0
return f | null |
158,887 | from ..translate import (
SqlColumn, SqlColumnAgg, extend_base, win_agg,
SqlTranslator,
annotate, sql_scalar, sql_agg, win_cumul, sql_not_impl,
AggOver, RankOver, Over,
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
from . import _dt_generics as _dt
class AggOver(CustomOverClause):
def set_over(self, group_by, order_by = None):
def func(cls, name):
def f(codata, col, *args, **kwargs) -> AggOver:
def annotate(f = None, **kwargs):
def sql_all(window = False):
f_win = AggOver if window else lambda x: x
@annotate(input_type="bool")
def f(_, col):
# similar to any, but uses (not cond) summed is 0
return f_win(fn.sum(fn.cast(~col, sa_types.Integer()))) == 0
return f | null |
158,888 | from ..translate import (
win_agg, win_over, win_cumul, sql_scalar, sql_agg, win_absent,
RankOver,
wrap_annotate, annotate,
extend_base,
SqlTranslator,
)
from .base import (
SqlColumn, SqlColumnAgg
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_is_quarter_end(_, col):
last_day = fn.date_trunc("quarter", col) + sql.text("INTERVAL '3 month' - INTERVAL '1 day'")
return fn.date_trunc("day", col) == last_day | null |
158,889 | from ..translate import (
win_agg, win_over, win_cumul, sql_scalar, sql_agg, win_absent,
RankOver,
wrap_annotate, annotate,
extend_base,
SqlTranslator,
)
from .base import (
SqlColumn, SqlColumnAgg
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
class PostgresqlColumn(SqlColumn): pass
def wrap_annotate(f, **kwargs):
from functools import wraps
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
wrapper.operation = kwargs
return wrapper
def returns_float(func_names):
# TODO: MC-NOTE - shift all translations to directly register
# TODO: MC-NOTE - make an AliasAnnotated class or something, that signals
# it is using another method, but w/ an updated annotation.
from siuba.ops import ALL_OPS
for name in func_names:
generic = ALL_OPS[name]
f_concrete = generic.dispatch(SqlColumn)
f_annotated = wrap_annotate(f_concrete, result_type="float")
generic.register(PostgresqlColumn, f_annotated) | null |
158,890 | from ..translate import (
win_agg, win_over, win_cumul, sql_scalar, sql_agg, win_absent,
RankOver,
wrap_annotate, annotate,
extend_base,
SqlTranslator,
)
from .base import (
SqlColumn, SqlColumnAgg
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_log(_, col, base = None):
if base is None:
return sql.func.ln(col)
return sql.func.log(col) | null |
158,891 | from ..translate import (
win_agg, win_over, win_cumul, sql_scalar, sql_agg, win_absent,
RankOver,
wrap_annotate, annotate,
extend_base,
SqlTranslator,
)
from .base import (
SqlColumn, SqlColumnAgg
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_round(_, col, n):
return sql.func.round(col, n) | null |
158,892 | from ..translate import (
win_agg, win_over, win_cumul, sql_scalar, sql_agg, win_absent,
RankOver,
wrap_annotate, annotate,
extend_base,
SqlTranslator,
)
from .base import (
SqlColumn, SqlColumnAgg
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_func_contains(_, col, pat, case = True, flags = 0, na = None, regex = True):
# TODO: warn there differences in regex for python and sql?
# TODO: validate pat is string?
if not isinstance(pat, str):
raise TypeError("pat argument must be a string")
if flags != 0 or na is not None:
raise NotImplementedError("flags and na options not supported")
if not regex:
case_col = col if case else col.lower()
return case_col.contains(pat, autoescape = True)
full_op = "~" if case else "~*"
return col.op(full_op)(pat) | null |
158,893 | from ..translate import (
win_agg, win_over, win_cumul, sql_scalar, sql_agg, win_absent,
RankOver,
wrap_annotate, annotate,
extend_base,
SqlTranslator,
)
from .base import (
SqlColumn, SqlColumnAgg
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
from sqlalchemy.sql import func as fn
def sql_func_truediv(_, x, y):
return sql.cast(x, sa_types.Float()) / y | null |
158,894 | import importlib
from contextlib import contextmanager
from pandas.io import sql as _pd_sql
import re
import sqlalchemy
def get_dialect_funcs(name):
#dialect = engine.dialect.name
mod = importlib.import_module('siuba.sql.dialects.{}'.format(name))
return mod.funcs | null |
158,895 | import importlib
from contextlib import contextmanager
from pandas.io import sql as _pd_sql
import re
import sqlalchemy
def get_sql_classes(name):
mod = importlib.import_module('siuba.sql.dialects.{}'.format(name))
win_name = name.title() + "Column"
agg_name = name.title() + "ColumnAgg"
return {
'window': getattr(mod, win_name),
'aggregate': getattr(mod, agg_name)
} | null |
158,896 | import importlib
from contextlib import contextmanager
try:
# once we drop sqlalchemy 1.2, can use create_mock_engine function
from sqlalchemy.engine.mock import MockConnection
except ImportError:
from sqlalchemy.engine.strategies import MockEngineStrategy
MockConnection = MockEngineStrategy.MockConnection
from pandas.io import sql as _pd_sql
import re
import sqlalchemy
def is_sqla_12():
return SQLA_VERSION[:-1] == (1, 2)
def is_sqla_13():
return SQLA_VERSION[:-1] == (1, 3)
The provided code snippet includes necessary dependencies for implementing the `mock_sqlalchemy_engine` function. Write a Python function `def mock_sqlalchemy_engine(dialect)` to solve the following problem:
Create a sqlalchemy.engine.Engine without it connecting to a database. Examples -------- :: from siuba.sql import LazyTbl from siuba import _, mutate, show_query engine = mock_sqlalchemy_engine('postgresql') tbl = LazyTbl(engine, 'some_table', ['x']) query = mutate(tbl, y = _.x + _.x) show_query(query)
Here is the function:
def mock_sqlalchemy_engine(dialect):
"""
Create a sqlalchemy.engine.Engine without it connecting to a database.
Examples
--------
::
from siuba.sql import LazyTbl
from siuba import _, mutate, show_query
engine = mock_sqlalchemy_engine('postgresql')
tbl = LazyTbl(engine, 'some_table', ['x'])
query = mutate(tbl, y = _.x + _.x)
show_query(query)
"""
from sqlalchemy.engine import Engine
from sqlalchemy.dialects import registry
from types import ModuleType
# TODO: can be removed once v1.3.18 support dropped
from sqlalchemy.engine.url import URL
dialect_cls = registry.load(dialect)
# there is probably a better way to do this, but for some reason duckdb
# returns a module, rather than the dialect class itself. By convention,
# dialect modules expose a variable named dialect, so we grab that.
if isinstance(dialect_cls, ModuleType):
dialect_cls = dialect_cls.dialect
conn = MockConnection(dialect_cls(), lambda *args, **kwargs: None)
# set a url on it, so that LazyTbl can read the backend name.
if is_sqla_12() or is_sqla_13():
url = URL(drivername=dialect)
else:
url = URL.create(drivername=dialect)
conn.url = url
return conn | Create a sqlalchemy.engine.Engine without it connecting to a database. Examples -------- :: from siuba.sql import LazyTbl from siuba import _, mutate, show_query engine = mock_sqlalchemy_engine('postgresql') tbl = LazyTbl(engine, 'some_table', ['x']) query = mutate(tbl, y = _.x + _.x) show_query(query) |
158,897 | import warnings
from collections.abc import Mapping
from sqlalchemy import sql
from siuba.dply.verbs import join, left_join, right_join, inner_join, semi_join, anti_join
from ..backend import LazyTbl
from ..utils import _sql_select
def _joined_cols(left_cols, right_cols, on_keys, how, suffix = ("_x", "_y")):
"""Return labeled columns, according to selection rules for joins.
Rules:
1. For join keys, keep left table's column
2. When keys have the same labels, add suffix
"""
# TODO: remove sets, so uses stable ordering
# when left and right cols have same name, suffix with _x / _y
keep_right = set(right_cols.keys()) - set(on_keys.values())
shared_labs = set(left_cols.keys()).intersection(keep_right)
right_cols_no_keys = {k: right_cols[k] for k in keep_right}
# for an outer join, have key columns coalesce values
left_cols = {**left_cols}
if how == "full":
for lk, rk in on_keys.items():
col = sql.functions.coalesce(left_cols[lk], right_cols[rk])
left_cols[lk] = col.label(lk)
elif how == "right":
for lk, rk in on_keys.items():
# Make left key columns actually be right ones (which contain left + extra)
left_cols[lk] = right_cols[rk].label(lk)
# create labels ----
l_labs = _relabeled_cols(left_cols, shared_labs, suffix[0])
r_labs = _relabeled_cols(right_cols_no_keys, shared_labs, suffix[1])
return l_labs + r_labs
def _raise_if_args(args):
if len(args):
raise NotImplemented("*args is reserved for future arguments (e.g. suffix)")
def _validate_join_arg_on(on, sql_on = None, lhs = None, rhs = None):
# handle sql on case
if sql_on is not None:
if on is not None:
raise ValueError("Cannot specify both on and sql_on")
return sql_on
# handle general cases
if on is None:
# TODO: currently, we check for lhs and rhs tables to indicate whether
# a verb supports inferring columns. Otherwise, raise an error.
if lhs is not None and rhs is not None:
# TODO: consolidate with duplicate logic in pandas verb code
warnings.warn(
"No on column passed to join. "
"Inferring join columns instead using shared column names."
)
on_cols = list(set(lhs.columns.keys()).intersection(set(rhs.columns.keys())))
if not on_cols:
raise ValueError(
"No join column specified, or shared column names in join."
)
# trivial dict mapping shared names to themselves
warnings.warn("Detected shared columns: %s" % on_cols)
on = dict(zip(on_cols, on_cols))
else:
raise NotImplementedError("on arg currently cannot be None (default) for SQL")
elif isinstance(on, str):
on = {on: on}
elif isinstance(on, (list, tuple)):
on = dict(zip(on, on))
if not isinstance(on, Mapping):
raise TypeError("on must be a Mapping (e.g. dict)")
return on
def _validate_join_arg_how(how):
how_options = ("inner", "left", "right", "full")
if how not in how_options:
raise ValueError("how argument needs to be one of %s" %how_options)
return how
def _create_join_conds(left_sel, right_sel, on):
left_cols = left_sel.columns #lift_inner_cols(left_sel)
right_cols = right_sel.columns #lift_inner_cols(right_sel)
if callable(on):
# callable, like with sql_on arg
conds = [on(left_cols, right_cols)]
else:
# dict-like of form {left: right}
conds = []
for l, r in on.items():
col_expr = left_cols[l] == right_cols[r]
conds.append(col_expr)
return sql.and_(*conds)
def join(left, right, on = None, how = None, *args, by = None, **kwargs):
"""Join two tables together, by matching on specified columns.
The functions inner_join, left_join, right_join, and full_join are provided
as wrappers around join, and are used in the examples.
Parameters
----------
left :
The left-hand table.
right :
The right-hand table.
on :
How to match them. Note that the keyword "by" can also be used for this
parameter, in order to support compatibility with dplyr.
how :
The type of join to perform (inner, full, left, right).
*args:
Additional postition arguments. Currently not supported.
**kwargs:
Additional keyword arguments. Currently not supported.
Returns
-------
pd.DataFrame
Examples
--------
>>> from siuba import _, inner_join, left_join, full_join, right_join
>>> from siuba.data import band_members, band_instruments, band_instruments2
>>> band_members
name band
0 Mick Stones
1 John Beatles
2 Paul Beatles
>>> band_instruments
name plays
0 John guitar
1 Paul bass
2 Keith guitar
Notice that above, only John and Paul have entries for band instruments.
This means that they will be the only two rows in the inner_join result:
>>> band_members >> inner_join(_, band_instruments)
name band plays
0 John Beatles guitar
1 Paul Beatles bass
A left join ensures all original rows of the left hand data are included.
>>> band_members >> left_join(_, band_instruments)
name band plays
0 Mick Stones NaN
1 John Beatles guitar
2 Paul Beatles bass
A full join is similar, but ensures all rows of both data are included.
>>> band_members >> full_join(_, band_instruments)
name band plays
0 Mick Stones NaN
1 John Beatles guitar
2 Paul Beatles bass
3 Keith NaN guitar
You can explicilty specify columns to join on using the "by" argument:
>>> band_members >> inner_join(_, band_instruments, by = "name")
n...
Use a dictionary for the by argument, to match up columns with different names:
>>> band_members >> full_join(_, band_instruments2, {"name": "artist"})
n...
Joins create a new row for each pair of matches. For example, the value 1
is in two rows on the left, and 2 rows on the right so 4 rows will be created.
>>> df1 = pd.DataFrame({"x": [1, 1, 3]})
>>> df2 = pd.DataFrame({"x": [1, 1, 2], "y": ["first", "second", "third"]})
>>> df1 >> left_join(_, df2)
x y
0 1 first
1 1 second
2 1 first
3 1 second
4 3 NaN
Missing values count as matches to eachother by default:
>>> df3 = pd.DataFrame({"x": [1, None], "y": 2})
>>> df4 = pd.DataFrame({"x": [1, None], "z": 3})
>>> left_join(df3, df4)
x y z
0 1.0 2 3
1 NaN 2 3
"""
if isinstance(right, DataFrameGroupBy):
right = right.obj
if not isinstance(right, DataFrame):
raise Exception("right hand table must be a DataFrame")
if how is None:
raise Exception("Must specify how argument")
if len(args) or len(kwargs):
raise NotImplementedError("extra arguments to pandas join not currently supported")
if on is None and by is not None:
on = by
# pandas uses outer, but dplyr uses term full
if how == "full":
how = "outer"
if isinstance(on, Mapping):
left_on, right_on = zip(*on.items())
return left.merge(right, how = how, left_on = left_on, right_on = right_on)
return left.merge(right, how = how, on = on)
def _sql_select(columns, *args, **kwargs):
from sqlalchemy import sql
if is_sqla_12() or is_sqla_13():
# use old syntax, where columns are passed as a list
return sql.select(columns, *args, **kwargs)
return sql.select(*columns, *args, **kwargs)
def _join(left, right, on = None, *args, by = None, how = "inner", sql_on = None):
_raise_if_args(args)
if on is None and by is not None:
on = by
# Needs to be on the table, not the select
left_sel = left.last_op.alias()
right_sel = right.last_op.alias()
# handle arguments ----
on = _validate_join_arg_on(on, sql_on)
how = _validate_join_arg_how(how)
# for equality join used to combine keys into single column
consolidate_keys = on if sql_on is None else {}
if how == "right":
# switch joins, since sqlalchemy doesn't have right join arg
# see https://stackoverflow.com/q/11400307/1144523
left_sel, right_sel = right_sel, left_sel
on = {v:k for k,v in on.items()}
# create join conditions ----
bool_clause = _create_join_conds(left_sel, right_sel, on)
# create join ----
join = left_sel.join(
right_sel,
onclause = bool_clause,
isouter = how != "inner",
full = how == "full"
)
# if right join, set selects back
if how == "right":
left_sel, right_sel = right_sel, left_sel
on = {v:k for k,v in on.items()}
# note, shared_keys assumes on is a mapping...
# TODO: shared_keys appears to be for when on is not specified, but was unused
#shared_keys = [k for k,v in on.items() if k == v]
labeled_cols = _joined_cols(
left_sel.columns,
right_sel.columns,
on_keys = consolidate_keys,
how = how
)
sel = _sql_select(labeled_cols).select_from(join)
return left.append_op(sel, order_by = tuple()) | null |
158,898 | import warnings
from collections.abc import Mapping
from sqlalchemy import sql
from siuba.dply.verbs import join, left_join, right_join, inner_join, semi_join, anti_join
from ..backend import LazyTbl
from ..utils import _sql_select
def _raise_if_args(args):
def _validate_join_arg_on(on, sql_on = None, lhs = None, rhs = None):
def _create_join_conds(left_sel, right_sel, on):
def _sql_select(columns, *args, **kwargs):
def _semi_join(left, right = None, on = None, *args, by = None, sql_on = None):
if on is None and by is not None:
on = by
_raise_if_args(args)
left_sel = left.last_op.alias()
right_sel = right.last_op.alias()
# handle arguments ----
on = _validate_join_arg_on(on, sql_on, left_sel, right_sel)
# create join conditions ----
bool_clause = _create_join_conds(left_sel, right_sel, on)
# create inner join ----
exists_clause = _sql_select([sql.literal(1)]) \
.select_from(right_sel) \
.where(bool_clause)
# only keep left hand select's columns ----
sel = _sql_select(left_sel.columns) \
.select_from(left_sel) \
.where(sql.exists(exists_clause))
return left.append_op(sel, order_by = tuple()) | null |
158,899 | import warnings
from collections.abc import Mapping
from sqlalchemy import sql
from siuba.dply.verbs import join, left_join, right_join, inner_join, semi_join, anti_join
from ..backend import LazyTbl
from ..utils import _sql_select
def _raise_if_args(args):
if len(args):
raise NotImplemented("*args is reserved for future arguments (e.g. suffix)")
def _validate_join_arg_on(on, sql_on = None, lhs = None, rhs = None):
# handle sql on case
if sql_on is not None:
if on is not None:
raise ValueError("Cannot specify both on and sql_on")
return sql_on
# handle general cases
if on is None:
# TODO: currently, we check for lhs and rhs tables to indicate whether
# a verb supports inferring columns. Otherwise, raise an error.
if lhs is not None and rhs is not None:
# TODO: consolidate with duplicate logic in pandas verb code
warnings.warn(
"No on column passed to join. "
"Inferring join columns instead using shared column names."
)
on_cols = list(set(lhs.columns.keys()).intersection(set(rhs.columns.keys())))
if not on_cols:
raise ValueError(
"No join column specified, or shared column names in join."
)
# trivial dict mapping shared names to themselves
warnings.warn("Detected shared columns: %s" % on_cols)
on = dict(zip(on_cols, on_cols))
else:
raise NotImplementedError("on arg currently cannot be None (default) for SQL")
elif isinstance(on, str):
on = {on: on}
elif isinstance(on, (list, tuple)):
on = dict(zip(on, on))
if not isinstance(on, Mapping):
raise TypeError("on must be a Mapping (e.g. dict)")
return on
def _create_join_conds(left_sel, right_sel, on):
left_cols = left_sel.columns #lift_inner_cols(left_sel)
right_cols = right_sel.columns #lift_inner_cols(right_sel)
if callable(on):
# callable, like with sql_on arg
conds = [on(left_cols, right_cols)]
else:
# dict-like of form {left: right}
conds = []
for l, r in on.items():
col_expr = left_cols[l] == right_cols[r]
conds.append(col_expr)
return sql.and_(*conds)
def _sql_select(columns, *args, **kwargs):
from sqlalchemy import sql
if is_sqla_12() or is_sqla_13():
# use old syntax, where columns are passed as a list
return sql.select(columns, *args, **kwargs)
return sql.select(*columns, *args, **kwargs)
def _anti_join(left, right = None, on = None, *args, by = None, sql_on = None):
if on is None and by is not None:
on = by
_raise_if_args(args)
left_sel = left.last_op.alias()
right_sel = right.last_op.alias()
# handle arguments ----
on = _validate_join_arg_on(on, sql_on, left, right)
# create join conditions ----
bool_clause = _create_join_conds(left_sel, right_sel, on)
# create inner join ----
#not_exists = ~sql.exists([1], from_obj = right_sel).where(bool_clause)
exists_clause = _sql_select([sql.literal(1)]) \
.select_from(right_sel) \
.where(bool_clause)
sel = left_sel.select().where(~sql.exists(exists_clause))
return left.append_op(sel, order_by = tuple()) | null |
158,900 | from siuba.dply.verbs import filter
from ..backend import LazyTbl
from ..translate import ColumnCollection
from ..utils import _sql_select
from sqlalchemy import sql
from siuba.siu import Call
from siuba.dply.across import _set_data_context
def _sql_select(columns, *args, **kwargs):
from sqlalchemy import sql
if is_sqla_12() or is_sqla_13():
# use old syntax, where columns are passed as a list
return sql.select(columns, *args, **kwargs)
return sql.select(*columns, *args, **kwargs)
def _set_data_context(ctx, window):
try:
token = ctx_verb_data.set(ctx)
token_win = ctx_verb_window.set(window)
yield
finally:
ctx_verb_data.reset(token)
ctx_verb_window.reset(token_win)
def _filter(__data, *args):
# Note: currently always produces 2 additional select statements,
# 1 for window/aggs, and 1 for the where clause
sel = __data.last_op.alias() # original select
win_sel = sel.select()
conds = []
windows = []
with _set_data_context(__data, window=True):
for ii, arg in enumerate(args):
if isinstance(arg, Call):
new_call = __data.shape_call(arg, verb_name = "Filter", arg_name = ii)
#var_cols = new_call.op_vars(attr_calls = False)
# note that a new win_sel is returned, w/ window columns appended
col_expr, win_cols, win_sel = __data.track_call_windows(
new_call,
sel.columns,
window_cte = win_sel
)
if isinstance(col_expr, ColumnCollection):
conds.extend(col_expr)
else:
conds.append(col_expr)
windows.extend(win_cols)
else:
conds.append(arg)
bool_clause = sql.and_(*conds)
# first cte, windows ----
if len(windows):
win_alias = win_sel.alias()
# move non-window functions to refer to win_sel clause (not the innermost) ---
bool_clause = sql.util.ClauseAdapter(win_alias) \
.traverse(bool_clause)
orig_cols = [win_alias.columns[k] for k in sel.columns.keys()]
else:
orig_cols = [sel]
# create second cte ----
filt_sel = _sql_select(orig_cols).where(bool_clause)
return __data.append_op(filt_sel) | null |
158,901 | from siuba.dply.verbs import group_by, ungroup
from ..backend import LazyTbl, ordered_union
from ..utils import lift_inner_cols
from .mutate import _mutate_cols
def group_by(__data, *args, add = False, **kwargs):
def ordered_union(x, y):
def lift_inner_cols(tbl):
def _mutate_cols(__data, args, kwargs, verb_name):
def _group_by(__data, *args, add = False, **kwargs):
if not (args or kwargs):
return __data.copy()
group_names, sel = _mutate_cols(__data, args, kwargs, "Group by")
if None in group_names:
raise NotImplementedError("Complex, unnamed expressions not supported in sql group_by")
# check whether we can just use underlying table ----
new_cols = lift_inner_cols(sel)
if set(new_cols).issubset(set(__data.last_op.columns)):
sel = __data.last_op
if add:
group_names = ordered_union(__data.group_by, group_names)
return __data.append_op(sel, group_by = tuple(group_names)) | null |
158,902 | from siuba.dply.verbs import group_by, ungroup
from ..backend import LazyTbl, ordered_union
from ..utils import lift_inner_cols
from .mutate import _mutate_cols
def group_by(__data, *args, add = False, **kwargs):
"""Return a grouped DataFrame, using columns or expressions to define groups.
Any operations (e.g. summarize, mutate, filter) performed on grouped data
will be performed "by group". Use `ungroup()` to remove the groupings.
Parameters
----------
__data:
The data being grouped.
*args:
Lazy expressions used to select the grouping columns. Currently, each
arg must refer to a single columns (e.g. _.cyl, _.mpg).
add: bool
If the data is already grouped, whether to add these groupings on top of those.
**kwargs:
Keyword arguments define new columns used to group the data.
Examples
--------
>>> from siuba import _, group_by, summarize, filter, mutate, head
>>> from siuba.data import cars
>>> by_cyl = cars >> group_by(_.cyl)
>>> by_cyl >> summarize(max_mpg = _.mpg.max(), max_hp = _.hp.max())
cyl max_mpg max_hp
0 4 33.9 113
1 6 21.4 175
2 8 19.2 335
>>> by_cyl >> filter(_.mpg == _.mpg.max())
(grouped data frame)
cyl mpg hp
3 6 21.4 110
19 4 33.9 65
24 8 19.2 175
>>> cars >> group_by(cyl2 = _.cyl + 1) >> head(2)
(grouped data frame)
cyl mpg hp cyl2
0 6 21.0 110 7
1 6 21.0 110 7
Note that creating the new grouping column is always performed on ungrouped data.
Use an explicit mutate on the grouped data perform the operation within groups.
For example, the code below calls pd.cut on the mpg column, within each cyl group.
>>> from siuba.siu import call
>>> (cars
... >> group_by(_.cyl)
... >> mutate(mpg_bin = call(pd.cut, _.mpg, 3))
... >> group_by(_.mpg_bin, add=True)
... >> head(2)
... )
(grouped data frame)
cyl mpg hp mpg_bin
0 6 21.0 110 (20.2, 21.4]
1 6 21.0 110 (20.2, 21.4]
"""
if isinstance(__data, DataFrameGroupBy):
tmp_df = __data.obj.copy()
else:
tmp_df = __data.copy()
# TODO: super inefficient, since it makes multiple copies of data
# need way to get the by_vars and apply (grouped) computation
computed = transmute(tmp_df, *args, **kwargs)
by_vars = list(computed.columns)
for k in by_vars:
tmp_df[k] = computed[k]
if isinstance(__data, DataFrameGroupBy) and add:
groupings = {el.name: el for el in __data.grouper.groupings}
for varname in by_vars:
# ensures group levels are recalculated if varname was in transmute
groupings[varname] = varname
return tmp_df.groupby(list(groupings.values()), dropna=False, group_keys=True)
return tmp_df.groupby(by = by_vars, dropna=False, group_keys=True)
def _ungroup(__data):
return __data.copy(group_by = tuple()) | null |
158,903 | from siuba.dply.verbs import (
simple_varname,
mutate,
transmute,
)
from ..backend import LazyTbl, SqlLabelReplacer
from ..translate import ColumnCollection
from ..utils import (
_sql_with_only_columns,
lift_inner_cols
)
from sqlalchemy import sql
from siuba.dply.across import _require_across, _eval_with_context
def _mutate_cols(__data, args, kwargs, verb_name):
result_names = {} # used as ordered set
sel = __data.last_select
for ii, func in enumerate(args):
cols_result = _eval_expr_arg(__data, sel, func, verb_name)
# replace any labels that require a subquery ----
sel = _select_mutate_result(sel, cols_result)
if isinstance(cols_result, ColumnCollection):
result_names.update({k: True for k in cols_result.keys()})
else:
result_names[cols_result.name] = True
for new_name, func in kwargs.items():
labeled = _eval_expr_kwarg(__data, sel, func, new_name, verb_name)
sel = _select_mutate_result(sel, labeled)
result_names[new_name] = True
return list(result_names), sel
def _mutate(__data, *args, **kwargs):
# TODO: verify it can follow a renaming select
# track labeled columns in set
if not (len(args) or len(kwargs)):
return __data.append_op(__data.last_op)
names, sel_out = _mutate_cols(__data, args, kwargs, "Mutate")
return __data.append_op(sel_out) | null |
158,904 | from siuba.dply.verbs import (
simple_varname,
mutate,
transmute,
)
from ..backend import LazyTbl, SqlLabelReplacer
from ..translate import ColumnCollection
from ..utils import (
_sql_with_only_columns,
lift_inner_cols
)
from sqlalchemy import sql
from siuba.dply.across import _require_across, _eval_with_context
def _mutate_cols(__data, args, kwargs, verb_name):
def _sql_with_only_columns(select, columns):
def lift_inner_cols(tbl):
def _transmute(__data, *args, **kwargs):
# will use mutate, then select some cols
result_names, sel = _mutate_cols(__data, args, kwargs, "Transmute")
# transmute keeps grouping cols, and any defined in kwargs
missing = [x for x in __data.group_by if x not in result_names]
cols_to_keep = [*missing, *result_names]
columns = lift_inner_cols(sel)
sel_stripped = _sql_with_only_columns(sel, [columns[k] for k in cols_to_keep])
return __data.append_op(sel_stripped) | null |
158,905 | from siuba.dply.verbs import distinct, mutate
from ..backend import LazyTbl, ordered_union
from ..utils import _sql_select, _sql_with_only_columns, lift_inner_cols
from .mutate import _mutate_cols
def distinct(__data, *args, _keep_all = False, **kwargs):
"""Keep only distinct (unique) rows from a table.
Parameters
----------
__data:
The input data.
*args:
Columns to use when determining which rows are unique.
_keep_all:
Whether to keep all columns of the original data, not just *args.
**kwargs:
If specified, arguments passed to the verb mutate(), and then being used
in distinct().
See Also
--------
count : keep distinct rows, and count their number of observations.
Examples
--------
>>> from siuba import _, distinct, select
>>> from siuba.data import penguins
>>> penguins >> distinct(_.species, _.island)
species island
0 Adelie Torgersen
1 Adelie Biscoe
2 Adelie Dream
3 Gentoo Biscoe
4 Chinstrap Dream
Use _keep_all=True, to keep all columns in each distinct row. This lets you
peak at the values of the first unique row.
>>> small_penguins = penguins >> select(_[:4])
>>> small_penguins >> distinct(_.species, _keep_all = True)
species island bill_length_mm bill_depth_mm
0 Adelie Torgersen 39.1 18.7
1 Gentoo Biscoe 46.1 13.2
2 Chinstrap Dream 46.5 17.9
"""
if not (args or kwargs):
return __data.drop_duplicates().reset_index(drop=True)
new_names, df_res = _mutate_cols(__data, args, kwargs)
tmp_data = df_res.drop_duplicates(new_names).reset_index(drop=True)
if not _keep_all:
return tmp_data[new_names]
return tmp_data
def ordered_union(x, y):
dx = {el: True for el in x}
dy = {el: True for el in y}
return tuple({**dx, **dy})
def _sql_select(columns, *args, **kwargs):
from sqlalchemy import sql
if is_sqla_12() or is_sqla_13():
# use old syntax, where columns are passed as a list
return sql.select(columns, *args, **kwargs)
return sql.select(*columns, *args, **kwargs)
def _sql_with_only_columns(select, columns):
if is_sqla_12() or is_sqla_13():
out = select.with_only_columns(columns)
else:
out = select.with_only_columns(*columns)
# ensure removing all columns doesn't remove from clause table reference
for _from in select.froms:
out = out.select_from(_from)
return out
def lift_inner_cols(tbl):
cols = list(tbl.inner_columns)
return _sql_column_collection(cols)
def _mutate_cols(__data, args, kwargs, verb_name):
result_names = {} # used as ordered set
sel = __data.last_select
for ii, func in enumerate(args):
cols_result = _eval_expr_arg(__data, sel, func, verb_name)
# replace any labels that require a subquery ----
sel = _select_mutate_result(sel, cols_result)
if isinstance(cols_result, ColumnCollection):
result_names.update({k: True for k in cols_result.keys()})
else:
result_names[cols_result.name] = True
for new_name, func in kwargs.items():
labeled = _eval_expr_kwarg(__data, sel, func, new_name, verb_name)
sel = _select_mutate_result(sel, labeled)
result_names[new_name] = True
return list(result_names), sel
def _distinct(__data, *args, _keep_all = False, **kwargs):
if (args or kwargs) and _keep_all:
raise NotImplementedError("Distinct with variables specified in sql requires _keep_all = False")
result_names, inner_sel = _mutate_cols(__data, args, kwargs, "Distinct")
# create list of final column names ----
missing = [name for name in __data.group_by if name not in result_names]
if not result_names:
# use all columns if none passed to distinct
all_names = list(lift_inner_cols(inner_sel).keys())
final_names = ordered_union(missing, all_names)
else:
final_names = ordered_union(missing, result_names)
if not (len(inner_sel._order_by_clause) or len(inner_sel._group_by_clause)):
# select distinct has to include any columns in the order by clause,
# so can only safely modify existing statement when there's no order by
sel_cols = lift_inner_cols(inner_sel)
distinct_cols = [sel_cols[k] for k in final_names]
sel = _sql_with_only_columns(inner_sel, distinct_cols).distinct()
else:
# fallback to cte
cte = inner_sel.alias()
distinct_cols = [cte.columns[k] for k in final_names]
sel = _sql_select(distinct_cols).select_from(cte).distinct()
return __data.append_op(sel) | null |
158,906 | import warnings
from sqlalchemy import sql
from siuba.dply.verbs import case_when, if_else
from siuba.siu import Call
from ..utils import _sql_case
from ..backend import LazyTbl
from ..translate import ColumnCollection
def _sql_case(whens, **kwargs):
from sqlalchemy import sql
if is_sqla_12() or is_sqla_13():
return sql.case(whens, **kwargs)
return sql.case(*whens, **kwargs)
def _case_when(__data, cases):
# TODO: will need listener to enter case statements, to handle when they use windows
if isinstance(cases, Call):
cases = cases(__data)
whens = []
case_items = list(cases.items())
n_items = len(case_items)
else_val = None
for ii, (expr, val) in enumerate(case_items):
# handle where val is a column expr
if callable(val):
val = val(__data)
# handle when expressions
#if ii+1 == n_items and expr is True:
# else_val = val
if expr is True:
# note: only sqlalchemy v1.3 requires wrapping in literal
whens.append((sql.literal(expr), val))
elif callable(expr):
whens.append((expr(__data), val))
else:
whens.append((expr, val))
return _sql_case(whens, else_ = else_val) | null |
158,907 | import warnings
from sqlalchemy import sql
from siuba.dply.verbs import case_when, if_else
from siuba.siu import Call
from ..utils import _sql_case
from ..backend import LazyTbl
from ..translate import ColumnCollection
def _case_when(__data, cases):
raise NotImplementedError(
"`case_when()` must be used inside a verb like `mutate()`, when using a "
"SQL backend."
) | null |
158,908 | import warnings
from sqlalchemy import sql
from siuba.dply.verbs import case_when, if_else
from siuba.siu import Call
from ..utils import _sql_case
from ..backend import LazyTbl
from ..translate import ColumnCollection
def _sql_case(whens, **kwargs):
def _if_else(cond, true_vals, false_vals):
whens = [(cond, true_vals)]
return _sql_case(whens, else_ = false_vals) | null |
158,909 | from siuba.dply.verbs import collect
from ..backend import LazyTbl
from ..utils import _FixedSqlDatabase, _is_dialect_duckdb, MockConnection
class _FixedSqlDatabase(_pd_sql.SQLDatabase):
def execute(self, *args, **kwargs):
if hasattr(self, "connectable"):
return self.connectable.execute(*args, **kwargs)
return self.con.execute(*args, **kwargs)
def _is_dialect_duckdb(engine):
return engine.url.get_backend_name() == "duckdb"
def _collect(__data, as_df = True):
# TODO: maybe remove as_df options, always return dataframe
if isinstance(__data.source, MockConnection):
# a mock sqlalchemy is being used to show_query, and echo queries.
# it doesn't return a result object or have a context handler, so
# we need to bail out early
return
# compile query ----
if _is_dialect_duckdb(__data.source):
# TODO: can be removed once next release of duckdb fixes:
# https://github.com/duckdb/duckdb/issues/2972
query = __data.last_select
compiled = query.compile(
dialect = __data.source.dialect,
compile_kwargs = {"literal_binds": True}
)
else:
compiled = __data.last_select
# execute query ----
with __data.source.connect() as conn:
if as_df:
sql_db = _FixedSqlDatabase(conn)
if _is_dialect_duckdb(__data.source):
# TODO: pandas read_sql is very slow with duckdb.
# see https://github.com/pandas-dev/pandas/issues/45678
# going to handle here for now. address once LazyTbl gets
# subclassed per backend.
duckdb_con = conn.connection.c
return duckdb_con.query(str(compiled)).to_df()
else:
#
return sql_db.read_sql(compiled)
return conn.execute(compiled) | null |
158,910 | from sqlalchemy import sql
from siuba.dply.verbs import count, add_count, inner_join, _check_name
from ..utils import _sql_select, _sql_add_columns, lift_inner_cols
from ..backend import LazyTbl, ordered_union
from ..translate import AggOver
from .mutate import _mutate_cols
def count(__data, *args, wt = None, sort = False, name=None, **kwargs):
"""Summarize data with the number of rows for each grouping of data.
Parameters
----------
__data:
A DataFrame.
*args:
The names of columns to be used for grouping. Passed to group_by.
wt:
The name of a column to use as a weighted for each row.
sort:
Whether to sort the results in descending order.
**kwargs:
Creates a new named column, and uses for grouping. Passed to group_by.
Examples
--------
>>> from siuba import _, count, group_by, summarize, arrange
>>> from siuba.data import mtcars
>>> count(mtcars, _.cyl, high_mpg = _.mpg > 30)
cyl high_mpg n
0 4 False 7
1 4 True 4
2 6 False 7
3 8 False 14
Use sort to order results by number of observations (in descending order).
>>> count(mtcars, _.cyl, sort=True)
cyl n
0 8 14
1 4 11
2 6 7
count is equivalent to doing a grouped summarize:
>>> mtcars >> group_by(_.cyl) >> summarize(n = _.shape[0]) >> arrange(-_.n)
cyl n
2 8 14
0 4 11
1 6 7
"""
no_grouping_vars = not args and not kwargs and isinstance(__data, pd.DataFrame)
if wt is None:
if no_grouping_vars:
# no groups, just use number of rows
counts = pd.DataFrame({'tmp': [__data.shape[0]]})
else:
# tally rows for each group
counts = group_by(__data, *args, add = True, **kwargs).size().reset_index()
else:
wt_col = simple_varname(wt)
if wt_col is None:
raise Exception("wt argument has to be simple column name")
if no_grouping_vars:
# no groups, sum weights
counts = pd.DataFrame({'tmp': [__data[wt_col].sum()]})
else:
# do weighted tally
counts = group_by(__data, *args, add = True, **kwargs)[wt_col].sum().reset_index()
# count col named, n. If that col already exists, add more "n"s...
out_col = _check_name(name, set(counts.columns))
# rename the tally column to correct name
counts.rename(columns = {counts.columns[-1]: out_col}, inplace = True)
if sort:
return counts.sort_values(out_col, ascending = False).reset_index(drop = True)
return counts
def _check_name(name, columns):
if name is None:
name = "n"
while name in columns:
name = name + "n"
elif name != "n" and name in columns:
raise ValueError(
f"Column name `{name}` specified for count name, but is already present in data."
)
elif not isinstance(name, str):
raise TypeError("`name` must be a single string.")
return name
def _sql_select(columns, *args, **kwargs):
from sqlalchemy import sql
if is_sqla_12() or is_sqla_13():
# use old syntax, where columns are passed as a list
return sql.select(columns, *args, **kwargs)
return sql.select(*columns, *args, **kwargs)
def lift_inner_cols(tbl):
cols = list(tbl.inner_columns)
return _sql_column_collection(cols)
def ordered_union(x, y):
dx = {el: True for el in x}
dy = {el: True for el in y}
return tuple({**dx, **dy})
def _mutate_cols(__data, args, kwargs, verb_name):
result_names = {} # used as ordered set
sel = __data.last_select
for ii, func in enumerate(args):
cols_result = _eval_expr_arg(__data, sel, func, verb_name)
# replace any labels that require a subquery ----
sel = _select_mutate_result(sel, cols_result)
if isinstance(cols_result, ColumnCollection):
result_names.update({k: True for k in cols_result.keys()})
else:
result_names[cols_result.name] = True
for new_name, func in kwargs.items():
labeled = _eval_expr_kwarg(__data, sel, func, new_name, verb_name)
sel = _select_mutate_result(sel, labeled)
result_names[new_name] = True
return list(result_names), sel
def _count(__data, *args, sort = False, wt = None, name=None, **kwargs):
if wt is not None:
raise NotImplementedError("wt argument is currently not implemented")
result_names, sel_inner = _mutate_cols(__data, args, kwargs, "Count")
# remove unnecessary select, if we're operating on a table ----
if set(lift_inner_cols(sel_inner)) == set(lift_inner_cols(__data.last_select)):
sel_inner = __data.last_op
# create outer select ----
# holds selected columns and tally (n)
sel_inner_cte = sel_inner.alias()
inner_cols = sel_inner_cte.columns
# apply any group vars from a group_by verb call first
missing = [k for k in __data.group_by if k not in result_names]
all_group_names = ordered_union(__data.group_by, result_names)
outer_group_cols = [inner_cols[k] for k in all_group_names]
# holds the actual count (e.g. n)
label_n = _check_name(name, set(inner_cols.keys()))
count_col = sql.functions.count().label(label_n)
sel_outer = _sql_select([*outer_group_cols, count_col]) \
.select_from(sel_inner_cte) \
.group_by(*outer_group_cols)
# count is like summarize, so removes order_by
return __data.append_op(
sel_outer.order_by(count_col.desc()),
order_by = tuple()
) | null |
158,911 | from sqlalchemy import sql
from siuba.dply.verbs import count, add_count, inner_join, _check_name
from ..utils import _sql_select, _sql_add_columns, lift_inner_cols
from ..backend import LazyTbl, ordered_union
from ..translate import AggOver
from .mutate import _mutate_cols
def count(__data, *args, wt = None, sort = False, name=None, **kwargs):
"""Summarize data with the number of rows for each grouping of data.
Parameters
----------
__data:
A DataFrame.
*args:
The names of columns to be used for grouping. Passed to group_by.
wt:
The name of a column to use as a weighted for each row.
sort:
Whether to sort the results in descending order.
**kwargs:
Creates a new named column, and uses for grouping. Passed to group_by.
Examples
--------
>>> from siuba import _, count, group_by, summarize, arrange
>>> from siuba.data import mtcars
>>> count(mtcars, _.cyl, high_mpg = _.mpg > 30)
cyl high_mpg n
0 4 False 7
1 4 True 4
2 6 False 7
3 8 False 14
Use sort to order results by number of observations (in descending order).
>>> count(mtcars, _.cyl, sort=True)
cyl n
0 8 14
1 4 11
2 6 7
count is equivalent to doing a grouped summarize:
>>> mtcars >> group_by(_.cyl) >> summarize(n = _.shape[0]) >> arrange(-_.n)
cyl n
2 8 14
0 4 11
1 6 7
"""
no_grouping_vars = not args and not kwargs and isinstance(__data, pd.DataFrame)
if wt is None:
if no_grouping_vars:
# no groups, just use number of rows
counts = pd.DataFrame({'tmp': [__data.shape[0]]})
else:
# tally rows for each group
counts = group_by(__data, *args, add = True, **kwargs).size().reset_index()
else:
wt_col = simple_varname(wt)
if wt_col is None:
raise Exception("wt argument has to be simple column name")
if no_grouping_vars:
# no groups, sum weights
counts = pd.DataFrame({'tmp': [__data[wt_col].sum()]})
else:
# do weighted tally
counts = group_by(__data, *args, add = True, **kwargs)[wt_col].sum().reset_index()
# count col named, n. If that col already exists, add more "n"s...
out_col = _check_name(name, set(counts.columns))
# rename the tally column to correct name
counts.rename(columns = {counts.columns[-1]: out_col}, inplace = True)
if sort:
return counts.sort_values(out_col, ascending = False).reset_index(drop = True)
return counts
def _check_name(name, columns):
if name is None:
name = "n"
while name in columns:
name = name + "n"
elif name != "n" and name in columns:
raise ValueError(
f"Column name `{name}` specified for count name, but is already present in data."
)
elif not isinstance(name, str):
raise TypeError("`name` must be a single string.")
return name
def _sql_add_columns(select, columns):
if is_sqla_12() or is_sqla_13():
for column in columns:
select = select.column(column)
return select
return select.add_columns(*columns)
def lift_inner_cols(tbl):
cols = list(tbl.inner_columns)
return _sql_column_collection(cols)
def ordered_union(x, y):
dx = {el: True for el in x}
dy = {el: True for el in y}
return tuple({**dx, **dy})
class AggOver(CustomOverClause):
"""Over clause for uses of functions min, max, avg, that return one value.
Note that this class does not set order by, which is how these functions
generally become their cumulative versions.
E.g. mean(x) -> AVG(x) OVER (partition_by <group vars>)
"""
def set_over(self, group_by, order_by = None):
self.partition_by = group_by
return self
def func(cls, name):
sa_func = getattr(sql.func, name)
def f(codata, col, *args, **kwargs) -> AggOver:
return cls(sa_func(col, *args, **kwargs))
return f
def _mutate_cols(__data, args, kwargs, verb_name):
result_names = {} # used as ordered set
sel = __data.last_select
for ii, func in enumerate(args):
cols_result = _eval_expr_arg(__data, sel, func, verb_name)
# replace any labels that require a subquery ----
sel = _select_mutate_result(sel, cols_result)
if isinstance(cols_result, ColumnCollection):
result_names.update({k: True for k in cols_result.keys()})
else:
result_names[cols_result.name] = True
for new_name, func in kwargs.items():
labeled = _eval_expr_kwarg(__data, sel, func, new_name, verb_name)
sel = _select_mutate_result(sel, labeled)
result_names[new_name] = True
return list(result_names), sel
def _add_count(__data, *args, wt = None, sort = False, name=None, **kwargs):
if wt is not None:
raise NotImplementedError("wt argument is currently not implemented")
result_names, sel_inner = _mutate_cols(__data, args, kwargs, "Count")
# TODO: if clause copied from count
# remove unnecessary select, if we're operating on a table ----
if set(lift_inner_cols(sel_inner)) == set(lift_inner_cols(__data.last_select)):
sel_inner = __data.last_select
inner_cols = lift_inner_cols(sel_inner)
# TODO: this code to append groups to columns copied a lot inside verbs
# apply any group vars from a group_by verb call first
missing = [k for k in __data.group_by if k not in result_names]
all_group_names = ordered_union(__data.group_by, result_names)
outer_group_cols = [inner_cols[k] for k in all_group_names]
count_col = AggOver(sql.functions.count(), partition_by=outer_group_cols)
label_n = _check_name(name, set(inner_cols.keys()))
sel_appended = _sql_add_columns(sel_inner, [count_col.label(label_n)])
return __data.append_op(sel_appended) | null |
158,912 | from sqlalchemy import sql
from siuba.dply.verbs import summarize
from .mutate import _sql_upsert_columns, _eval_expr_arg, _eval_expr_kwarg
from ..utils import lift_inner_cols, _sql_with_only_columns
from ..backend import LazyTbl, get_single_from
def _collapse_select(outer_sel, inner_alias):
def _aggregate_cols(__data, subquery, args, kwargs, verb_name):
def get_single_from(sel):
def _summarize(__data, *args, **kwargs):
# https://stackoverflow.com/questions/14754994/why-is-sqlalchemy-count-much-slower-than-the-raw-query
# get query with correct from clause, and maybe unneeded subquery
safe_from = __data.last_select.alias()
result_names, sel = _aggregate_cols(__data, safe_from, args, kwargs, "Summarize")
# see if we can remove subquery
out_sel = _collapse_select(sel, safe_from)
from_tbl = get_single_from(out_sel)
group_cols = [from_tbl.columns[k] for k in __data.group_by]
final_sel = out_sel.group_by(*group_cols)
new_data = __data.append_op(final_sel, group_by = tuple(), order_by = tuple())
return new_data | null |
158,913 | from siuba.dply.verbs import select, rename, _select_group_renames
from siuba.dply.tidyselect import VarList, var_select
from siuba.dply.verbs import simple_varname
from pandas import Series
from ..backend import LazyTbl, _warn_missing
from ..utils import lift_inner_cols, _sql_with_only_columns
def _select_group_renames(selection: dict, group_cols):
"""Returns a 2-tuple: groups missing in the select, new group keys."""
renamed = {k: v for k,v in selection.items() if v is not None}
sel_groups = [
renamed[colname] or colname for colname in group_cols if colname in renamed
]
missing_groups = [colname for colname in group_cols if colname not in selection]
return missing_groups, (*missing_groups, *sel_groups)
class VarList:
def __getattr__(self, x):
return Var(x)
def __getitem__(self, x):
if not isinstance(x, tuple):
return Var(x) if not isinstance(x, Var) else x
else:
res = [el if isinstance(el, Var) else Var(el) for el in x]
return VarAnd(tuple(res))
def var_select(colnames, *args, data=None):
# TODO: don't erase named column if included again
colnames = colnames if isinstance(colnames, pd.Series) else pd.Series(colnames)
cols = OrderedDict()
#flat_args = var_flatten(args)
all_vars = chain(*map(flatten_var, args))
# Add entries in pandas.rename style {"orig_name": "new_name"}
for ii, arg in enumerate(all_vars):
# strings are added directly
if isinstance(arg, str):
cols[arg] = None
# integers add colname at corresponding index
elif isinstance(arg, int):
cols[colnames.iloc[arg]] = None
# general var handling
elif isinstance(arg, Var):
# remove negated Vars, otherwise include them
if ii == 0 and arg.negated:
# if negation used as first arg apply an implicit everything
cols.update((k, None) for k in colnames)
# slicing can refer to single, or range of columns
if isinstance(arg.name, slice):
start, stop = var_slice(colnames, arg.name)
for ii in range(start, stop):
var_put_cols(colnames[ii], arg, cols)
# method calls like endswith()
elif callable(arg.name):
# TODO: not sure if this is a good idea...
# basically proxies to pandas str methods (they must return bool array)
indx = arg.name(colnames.str)
var_put_cols(colnames[indx].tolist(), arg, cols)
#cols.update((x, None) for x in set(colnames[indx]) - set(cols))
elif isinstance(arg.name, int):
var_put_cols(colnames.iloc[arg.name], arg, cols)
else:
var_put_cols(arg.name, arg, cols)
elif callable(arg) and data is not None:
# TODO: call on the data
col_mask = colwise_eval(data, arg)
for name in colnames[col_mask]:
cols[name] = None
else:
raise Exception("variable must be either a string or Var instance")
return cols
def _warn_missing(missing_groups):
warnings.warn(f"Adding missing grouping variables: {missing_groups}")
def _sql_with_only_columns(select, columns):
if is_sqla_12() or is_sqla_13():
out = select.with_only_columns(columns)
else:
out = select.with_only_columns(*columns)
# ensure removing all columns doesn't remove from clause table reference
for _from in select.froms:
out = out.select_from(_from)
return out
def _select(__data, *args, **kwargs):
# see https://stackoverflow.com/questions/25914329/rearrange-columns-in-sqlalchemy-select-object
if kwargs:
raise NotImplementedError(
"Using kwargs in select not currently supported. "
"Use _.newname == _.oldname instead"
)
last_sel = __data.last_select
columns = {c.key: c for c in last_sel.inner_columns}
# same as for DataFrame
colnames = Series(list(columns))
vl = VarList()
evaluated = (arg(vl) if callable(arg) else arg for arg in args)
od = var_select(colnames, *evaluated)
missing_groups, group_keys = _select_group_renames(od, __data.group_by)
if missing_groups:
_warn_missing(missing_groups)
final_od = {**{k: None for k in missing_groups}, **od}
col_list = []
for k,v in final_od.items():
col = columns[k]
col_list.append(col if v is None else col.label(v))
return __data.append_op(
_sql_with_only_columns(last_sel, col_list),
group_by = group_keys
) | null |
158,914 | from siuba.dply.verbs import select, rename, _select_group_renames
from siuba.dply.tidyselect import VarList, var_select
from siuba.dply.verbs import simple_varname
from pandas import Series
from ..backend import LazyTbl, _warn_missing
from ..utils import lift_inner_cols, _sql_with_only_columns
def simple_varname(call):
if isinstance(call, str):
return call
# check for expr like _.some_var or _["some_var"]
if (isinstance(call, Call)
and call.func in {"__getitem__", "__getattr__"}
and isinstance(call.args[0], MetaArg)
and isinstance(call.args[1], (str, _SliceOpIndex))
):
# return variable name
name = call.args[1]
if isinstance(name, str):
return name
elif isinstance(name, _SliceOpIndex):
return name.args[0]
return None
def _select_group_renames(selection: dict, group_cols):
"""Returns a 2-tuple: groups missing in the select, new group keys."""
renamed = {k: v for k,v in selection.items() if v is not None}
sel_groups = [
renamed[colname] or colname for colname in group_cols if colname in renamed
]
missing_groups = [colname for colname in group_cols if colname not in selection]
return missing_groups, (*missing_groups, *sel_groups)
def _sql_with_only_columns(select, columns):
if is_sqla_12() or is_sqla_13():
out = select.with_only_columns(columns)
else:
out = select.with_only_columns(*columns)
# ensure removing all columns doesn't remove from clause table reference
for _from in select.froms:
out = out.select_from(_from)
return out
def lift_inner_cols(tbl):
cols = list(tbl.inner_columns)
return _sql_column_collection(cols)
def _rename(__data, **kwargs):
sel = __data.last_select
columns = lift_inner_cols(sel)
# old_keys uses dict as ordered set
old_to_new = {simple_varname(v):k for k,v in kwargs.items()}
if None in old_to_new:
raise KeyError("positional arguments must be simple column, "
"e.g. _.colname or _['colname']"
)
labs = [c.label(old_to_new[k]) if k in old_to_new else c for k,c in columns.items()]
new_sel = _sql_with_only_columns(sel, labs)
missing_groups, group_keys = _select_group_renames(old_to_new, __data.group_by)
return __data.append_op(new_sel, group_by=group_keys) | null |
158,915 | from siuba.dply.verbs import head
from ..backend import LazyTbl
def _head(__data, n = 5):
sel = __data.last_select
return __data.append_op(sel.limit(n)) | null |
158,916 | from siuba.dply.verbs import arrange, _call_strip_ascending
from siuba.dply.across import _set_data_context
from ..utils import lift_inner_cols
from ..backend import LazyTbl
from ..translate import ColumnCollection
def _eval_arrange_args(__data, args, cols):
sort_cols = []
for ii, expr in enumerate(args):
shaped = __data.shape_call(
expr, window = False, str_accessors = True,
verb_name = "Arrange", arg_name = ii,
)
new_call, ascending = _call_strip_ascending(shaped)
with _set_data_context(__data, window=True):
res = new_call(cols)
if isinstance(res, ColumnCollection):
raise NotImplementedError(
f"`arrange()` expression {ii} of {len(args)} returned multiple columns, "
"which is currently unsupported."
)
if not ascending:
res = res.desc()
sort_cols.append(res)
return sort_cols
def lift_inner_cols(tbl):
cols = list(tbl.inner_columns)
return _sql_column_collection(cols)
def _arrange(__data, *args):
# Note that SQL databases often do not subquery order by clauses. Arrange
# sets order_by on the backend, so it can set order by in over elements,
# and handle when new columns are named the same as order by vars.
# see: https://dba.stackexchange.com/q/82930
last_sel = __data.last_select
cols = lift_inner_cols(last_sel)
# TODO: implement across in arrange
sort_cols = _eval_arrange_args(__data, args, cols)
final_sel = last_sel.order_by(None).order_by(*sort_cols)
return __data.append_op(final_sel, order_by = tuple(args)) | null |
158,917 | from ..backend import LazyTbl
from ..utils import _sql_simplify_select
from siuba.dply.verbs import show_query
def _sql_simplify_select(select):
from sqlalchemy import sql
from sqlalchemy.sql.visitors import traverse, cloned_traverse
def simplify_sel(sel):
if isinstance(sel, sql.Select) and len(sel.froms) == 1:
child = sel.froms[0]
# technically should be an ordered set
crnt_cols = set(sel.inner_columns)
child_cols = set(child.columns)
if len(child_cols - crnt_cols) == 0:
remaining = list(crnt_cols - child_cols)
star = sql.text("*")
star._from_objects = (child,)
sel._raw_columns = [star, *remaining]
_sql_refresh(sel)
# TODO: find simpler way to clone an element. We cannot use the visitors
# argument of cloned_traverse, since it visits the inner-most element first.
clone_el = select._clone()
# modify in-place
traverse(clone_el, {}, {"select": simplify_sel})
return clone_el
def _show_query(tbl, simplify = False, return_table = True):
#query = tbl.last_op #if not simplify else
compile_query = lambda query: query.compile(
dialect = tbl.source.dialect,
compile_kwargs = {"literal_binds": True}
)
if simplify:
# try to strip table names and labels where unnecessary
simple_sel = _sql_simplify_select(tbl.last_select)
explained = compile_query(simple_sel)
else:
# use a much more verbose query
explained = compile_query(tbl.last_select)
if return_table:
print(str(explained))
return tbl
return str(explained) | null |
158,918 | from sqlalchemy import sql
from siuba.siu import FunctionLookupBound, FunctionLookupError
import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql import ColumnCollection
from sqlalchemy.sql.elements import Over
from siuba.ops.translate import create_pandas_translator
from functools import singledispatch
def sql_agg(name):
sa_func = getattr(sql.func, name)
return lambda codata, col: sa_func(col) | null |
158,919 | from sqlalchemy import sql
from siuba.siu import FunctionLookupBound, FunctionLookupError
import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql import ColumnCollection
from sqlalchemy.sql.elements import Over
from siuba.ops.translate import create_pandas_translator
from functools import singledispatch
def sql_scalar(name):
sa_func = getattr(sql.func, name)
return lambda codata, col, *args: sa_func(col, *args) | null |
158,920 | from sqlalchemy import sql
from siuba.siu import FunctionLookupBound, FunctionLookupError
import warnings
from sqlalchemy.sql.elements import ClauseElement
class SqlColumn(SqlBase): pass
from sqlalchemy.sql import ColumnCollection
from sqlalchemy.sql.elements import Over
from siuba.ops.translate import create_pandas_translator
from functools import singledispatch
def sql_colmeth(meth, *outerargs):
def f(codata, col, *args) -> SqlColumn:
return getattr(col, meth)(*outerargs, *args)
return f | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.