code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 13:12:28 2021
@author: <NAME> -workshop-LA-UP_IIT
"""
import geopandas as gpd
import fiona,io
from tqdm import tqdm
import pyproj
# pd.set_option('display.max_columns', None)
nanjing_epsg=32650 #Nanjing
data_dic={
'road_network':r'.\data\GIS\road Network Data OF Nanjing On 20190716.kml',
'qingliangMountain_boundary':r'./data/GIS/QingliangMountain_boundary.kml',
'building_footprint':r'./data/GIS/Nanjing Building footprint Data/NanjingBuildingfootprintData.shp',
'bus_routes':r'./data/GIS/SHP data of Nanjing bus route and stations in December 2020/busRouteStations_20201218135814.shp',
'bus_stations':r'./data/GIS/SHP data of Nanjing bus route and stations in December 2020/busRouteStations_20201218135812.shp',
'subway_lines':r'./data/GIS/SHP of Nanjing subway station and line on 2020/SHP of Nanjing subway station and line on 2020 (2).shp',
'subway_stations':r'./data/GIS/SHP of Nanjing subway station and line on 2020/SHP of Nanjing subway station and line on 2020.shp',
'population':r'./data/GIS/SHP of population distribution in Nanjing in 2020/SHP of population distribution in Nanjing in 2020.shp',
'taxi':r'./data/GIS/Nanjing taxi data in 2016',
'POI':r"./data/GIS/Nanjing POI 201912.csv",
'microblog':r'./data/GIS/Najing Metro Weibo publish.db',
'bike_sharing':r'./data/GIS/One hundred thousand shared bikes.xls',
'sentinel_2':r'C:\Users\richi\omen_richiebao\omen_IIIT\workshop_LA_UP_iit\data\RS\S2B_MSIL2A_20200819T024549_N0214_R132_T50SPA_20200819T045147.SAFE',
'comprehensive_park':r'./data/GIS/NanjingParks.kml',
}
class SQLite_handle():
def __init__(self,db_file):
self.db_file=db_file
def create_connection(self):
import sqlite3
from sqlite3 import Error
""" create a database connection to a SQLite database """
conn=None
try:
conn=sqlite3.connect(self.db_file)
print('connected.',"SQLite version:%s"%sqlite3.version,)
except Error as e:
print(e)
finally:
if conn:
conn.close()
def boundary_angularPts(bottom_left_lon,bottom_left_lat,top_right_lon,top_right_lat):
bottom_left=(bottom_left_lon,bottom_left_lat)
top_right=(top_right_lon,top_right_lat)
boundary=[(bottom_left[0],bottom_left[1]),(top_right[0],bottom_left[1]),(top_right[0], top_right[1]),(bottom_left[0], top_right[1])]
return boundary
def boundary_buffer_centroidCircle(kml_extent,proj_epsg,bounadry_type='buffer_circle',buffer_distance=1000):
import pyproj
from shapely.ops import transform
from shapely.geometry import Point,LinearRing,Polygon
import geopandas as gpd
gpd.io.file.fiona.drvsupport.supported_drivers['KML'] = 'rw'
boundary_gdf=gpd.read_file(kml_extent,driver='KML')
# print(boundary_gdf)
wgs84=pyproj.CRS('EPSG:4326')
utm=pyproj.CRS('EPSG:{}'.format(proj_epsg))
project=pyproj.Transformer.from_crs(wgs84, utm, always_xy=True).transform
boundary_proj=transform(project,boundary_gdf.geometry.values[0])
if bounadry_type=='buffer_circle':
b_centroid=boundary_proj.centroid
b_centroid_buffer=b_centroid.buffer(buffer_distance)
c_area=[b_centroid_buffer.area]
gpd.GeoDataFrame({'area': c_area,'geometry':b_centroid_buffer},crs=utm).to_crs(wgs84).to_file('./data/GIS/b_centroid_buffer.shp')
b_centroid_gpd=gpd.GeoDataFrame({'x':[b_centroid.x],'y':[b_centroid.y],'geometry':[b_centroid]},crs=utm)# .to_crs(wgs84)
gpd2postSQL(b_centroid_gpd,table_name='b_centroid',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
return b_centroid_buffer
elif bounadry_type=='buffer_offset':
boundary_=Polygon(boundary_proj.exterior.coords)
LR_buffer=boundary_.buffer(buffer_distance,join_style=1).difference(boundary_) #LinearRing
# LR_buffer=Polygon(boundary_proj.exterior.coords)
LR_area=[LR_buffer.area]
gpd.GeoDataFrame({'area': LR_area,'geometry':LR_buffer},crs=utm).to_crs(wgs84).to_file('./data/GIS/LR_buffer.shp')
return LR_buffer
def kml2gdf(fn,epsg=None,boundary=None):
import pandas as pd
import geopandas as gpd
import fiona,io
# Enable fiona driver
gpd.io.file.fiona.drvsupport.supported_drivers['KML'] = 'rw'
kml_gdf=gpd.GeoDataFrame()
for layer in tqdm(fiona.listlayers(fn)):
# print("_"*50)
# print(layer)
src=fiona.open(fn, layer=layer)
meta = src.meta
meta['driver'] = 'KML'
with io.BytesIO() as buffer:
with fiona.open(buffer, 'w', **meta) as dst:
for i, feature in enumerate(src):
if len(feature['geometry']['coordinates']) > 1:
# print(feature['geometry']['coordinates'])
dst.write(feature)
# break
buffer.seek(0)
one_layer=gpd.read_file(buffer,driver='KML')
one_layer['group']=layer
kml_gdf=kml_gdf.append(one_layer,ignore_index=True)
# crs={'init': 'epsg:4326'}
if epsg is not None:
kml_gdf_proj=kml_gdf.to_crs(epsg=epsg)
if boundary:
kml_gdf_proj['mask']=kml_gdf_proj.geometry.apply(lambda row:row.within(boundary))
kml_gdf_proj.query('mask',inplace=True)
return kml_gdf_proj
def kml2gdf_folder(fn,epsg=None,boundary=None):
import pandas as pd
import geopandas as gpd
import fiona,io
# Enable fiona driver
gpd.io.file.fiona.drvsupport.supported_drivers['KML'] = 'rw'
kml_gdf=gpd.GeoDataFrame()
for layer in tqdm(fiona.listlayers(fn)):
# print("_"*50)
# print(layer)
src=fiona.open(fn, layer=layer)
meta = src.meta
meta['driver'] = 'KML'
with io.BytesIO() as buffer:
with fiona.open(buffer, 'w', **meta) as dst:
for i, feature in enumerate(src):
# print(feature)
# print("_"*50)
# print(feature['geometry']['coordinates'])
if len(feature['geometry']['coordinates'][0]) > 1:
# print(feature['geometry']['coordinates'])
dst.write(feature)
# break
buffer.seek(0)
one_layer=gpd.read_file(buffer,driver='KML')
# print(one_layer)
one_layer['group']=layer
kml_gdf=kml_gdf.append(one_layer,ignore_index=True)
if epsg is not None:
kml_gdf_proj=kml_gdf.to_crs(epsg=epsg)
if boundary:
kml_gdf_proj['mask']=kml_gdf_proj.geometry.apply(lambda row:row.within(boundary))
kml_gdf_proj.query('mask',inplace=True)
return kml_gdf_proj
def shp2gdf(fn,epsg=None,boundary=None,encoding='utf-8'):
import geopandas as gpd
shp_gdf=gpd.read_file(fn,encoding=encoding)
print('original data info:{}'.format(shp_gdf.shape))
shp_gdf.dropna(how='all',axis=1,inplace=True)
print('dropna-how=all,result:{}'.format(shp_gdf.shape))
shp_gdf.dropna(inplace=True)
print('dropna-several rows,result:{}'.format(shp_gdf.shape))
# print(shp_gdf)
if epsg is not None:
shp_gdf_proj=shp_gdf.to_crs(epsg=epsg)
if boundary:
shp_gdf_proj['mask']=shp_gdf_proj.geometry.apply(lambda row:row.within(boundary))
shp_gdf_proj.query('mask',inplace=True)
return shp_gdf_proj
def csv2gdf_A_taxi(data_root,epsg=None,boundary=None,): #
import glob
from pathlib import Path
import geopandas as gpd
import pandas as pd
import datetime
# from functools import reduce
from tqdm import tqdm
suffix='csv'
fns=glob.glob(data_root+"/*.{}".format(suffix))
fns_stem_df=pd.DataFrame([Path(fn).stem.split('_')[:2]+[fn] for fn in fns],columns=['info','date','file_path']).set_index(['info','date'])
g_df_dict={}
# i=0
for info,g in tqdm(fns_stem_df.groupby(level=0)):
g_df=pd.concat([pd.read_csv(fn).assign(date=idx[1]) for fn in g.file_path for idx in g.index]).rename({'value':'value_{}'.format(g.index[0][0])},axis=1)
g_df['time']=g_df.apply(lambda row:datetime.datetime.strptime(row.date+' {}:0:0'.format(row.hour), '%Y.%m.%d %H:%S:%f'),axis=1)
g_gdf=gpd.GeoDataFrame(g_df,geometry=gpd.points_from_xy(g_df.longitude,g_df.latitude,),crs='epsg:4326')
# print(g_gdf)
if epsg is not None:
g_gdf_proj=g_gdf.to_crs(epsg=epsg)
if boundary:
g_gdf_proj['mask']=g_gdf_proj.geometry.apply(lambda row:row.within(boundary))
g_gdf_proj.query('mask',inplace=True)
g_df_dict['value_{}'.format(g.index[0][0])]=g_gdf_proj
# if i==1:
# break
# i+=1
return g_df_dict
def csv2gdf_A_POI(fn,epsg=None,boundary=None,encoding='utf-8'): #
import glob
from pathlib import Path
import geopandas as gpd
import pandas as pd
from tqdm import tqdm
csv_df=pd.read_csv(fn,encoding=encoding)
csv_df['superclass']=csv_df['POI类型'].apply(lambda row:row.split(';')[0])
# print(csv_df)
# print(csv_df.columns)
csv_gdf=gpd.GeoDataFrame(csv_df,geometry=gpd.points_from_xy(csv_df['经度'],csv_df['纬度']),crs='epsg:4326')
if epsg is not None:
csv_gdf_proj=csv_gdf.to_crs(epsg=epsg)
if boundary:
csv_gdf_proj['mask']=csv_gdf_proj.geometry.apply(lambda row:row.within(boundary))
csv_gdf_proj.query('mask',inplace=True)
return csv_gdf_proj
def db2df(database_sql,table):
import sqlite3
import pandas as pd
conn=sqlite3.connect(database_sql)
df=pd.read_sql_query("SELECT * from {}".format(table), conn)
# print(df)
return df
def xls2gdf(fn,epsg=None,boundary=None,sheet_name=0):
import geopandas as gpd
import pandas as pd
from shapely.geometry import LineString
xls_df=pd.read_excel(fn,sheet_name=sheet_name)
# print(xls_df)
# print(xls_df.columns)
xls_df['route_line']=xls_df.apply(lambda row:LineString([(row['开始维度'],row['开始经度'],),(row['结束维度'],row['结束经度'],)]),axis=1)
xls_gdf=gpd.GeoDataFrame(xls_df,geometry=xls_df.route_line,crs='epsg:4326')
# print(xls_df)
if epsg is not None:
xls_gdf_proj=xls_gdf.to_crs(epsg=epsg)
if boundary:
xls_gdf_proj['mask']=xls_gdf_proj.geometry.apply(lambda row:row.within(boundary))
xls_gdf_proj.query('mask',inplace=True)
return xls_gdf_proj
def Sentinel2_bandFNs(MTD_MSIL2A_fn):
import xml.etree.ElementTree as ET
'''
funciton - 获取sentinel-2波段文件路径,和打印主要信息
Paras:
MTD_MSIL2A_fn - MTD_MSIL2A 文件路径
Returns:
band_fns_list - 波段相对路径列表
band_fns_dict - 波段路径为值,反应波段信息的字段为键的字典
'''
Sentinel2_tree=ET.parse(MTD_MSIL2A_fn)
Sentinel2_root=Sentinel2_tree.getroot()
print("GENERATION_TIME:{}\nPRODUCT_TYPE:{}\nPROCESSING_LEVEL:{}".format(Sentinel2_root[0][0].find('GENERATION_TIME').text,
Sentinel2_root[0][0].find('PRODUCT_TYPE').text,
Sentinel2_root[0][0].find('PROCESSING_LEVEL').text
))
# print("MTD_MSIL2A.xml 文件父结构:")
for child in Sentinel2_root:
print(child.tag,"-",child.attrib)
print("_"*50)
band_fns_list=[elem.text for elem in Sentinel2_root.iter('IMAGE_FILE')] #[elem.text for elem in Sentinel2_root[0][0][11][0][0].iter()]
band_fns_dict={f.split('_')[-2]+'_'+f.split('_')[-1]:f+'.jp2' for f in band_fns_list}
# print('get sentinel-2 bands path:\n',band_fns_dict)
return band_fns_list,band_fns_dict
# Function to normalize the grid values
def normalize_(array):
"""
function - 数组标准化 Normalizes numpy arrays into scale 0.0 - 1.0
"""
array_min, array_max = array.min(), array.max()
return ((array - array_min)/(array_max - array_min))
def sentinel_2_NDVI(sentinel_2_root,save_path):
import os
import earthpy.spatial as es
import rasterio as rio
from tqdm import tqdm
import shapely
import numpy as np
from scipy import stats
from osgeo import gdal
MTD_fn=os.path.join(sentinel_2_root,'MTD_MSIL2A.xml')
band_fns_list,band_fns_dict=Sentinel2_bandFNs(MTD_fn)
# print(band_fns_dict).
bands_selection=["B02_10m","B03_10m","B04_10m","B08_10m"]
stack_bands=[os.path.join(sentinel_2_root,band_fns_dict[b]) for b in bands_selection]
# print(stack_bands)
array_stack, meta_data=es.stack(stack_bands)
meta_data.update(
count=1,
dtype=rio.float64,
driver='GTiff'
)
print("meta_data:\n",meta_data)
NDVI=(array_stack[3]-array_stack[2])/(array_stack[3]+array_stack[2])
with rio.open(save_path,'w',**meta_data) as dst:
dst.write(np.expand_dims(NDVI.astype(meta_data['dtype']),axis=0))
print('NDVI has been saved as raster .tif format....')
return NDVI
def raster_crop(raster_fn,crop_shp_fn,boundary=None):
import rasterio as rio
import geopandas as gpd
import earthpy.spatial as es
import numpy as np
import earthpy.plot as ep
from shapely.geometry import shape
with rio.open(raster_fn) as src:
ori_raster=src.read(1)
ori_profile=src.profile
print(ori_raster.shape)
crop_boundary=gpd.read_file(crop_shp_fn).to_crs(ori_profile['crs'])
# print(crop_boundary)
print("_"*50)
print(' crop_boundary: {}'.format(crop_boundary.crs))
print("_"*50)
print(' ori_raster: {}'.format( ori_profile['crs']))
with rio.open(raster_fn) as src:
cropped_img, cropped_meta=es.crop_image(src,crop_boundary)
print(cropped_img.shape)
cropped_meta.update({"driver": "GTiff",
"height": cropped_img.shape[0],
"width": cropped_img.shape[1],
"transform": cropped_meta["transform"]})
cropped_img_mask=np.ma.masked_equal(cropped_img[0], -9999.0)
# print(cropped_img_mask)
ep.plot_bands(cropped_img_mask, cmap='terrain', cbar=False)
print(type(cropped_img_mask))
cropped_shapes=(
{'properties': {'raster_val': v}, 'geometry': s}
for i, (s, v) in enumerate(rio.features.shapes(cropped_img.astype(np.float32),transform=cropped_meta['transform']))) #,mask=None
# print(cropped_shapes)
geoms=list(cropped_shapes)
print(geoms[0])
cropped_img_gpd=gpd.GeoDataFrame.from_features(geoms)
cropped_img_gpd.geometry=cropped_img_gpd.geometry.apply(lambda row:row.centroid)
# print(cropped_img_gpd)
if boundary:
cropped_img_gpd['mask']=cropped_img_gpd.geometry.apply(lambda row:row.within(boundary))
cropped_img_gpd.query('mask',inplace=True)
return cropped_img_gpd
def gpd2SQLite(gdf_,db_fp,table_name):
from geoalchemy2 import Geometry, WKTElement
from sqlalchemy import create_engine
import pandas as pd
import copy
import shapely.wkb
gdf=copy.deepcopy(gdf_)
crs=gdf.crs
# print(help(crs))
# print(crs.to_epsg())
# gdf['geom']=gdf['geometry'].apply(lambda g: WKTElement(g.wkt,srid=crs.to_epsg()))
#convert all values from the geopandas geometry column into their well-known-binary representations
gdf['geom']=gdf.apply(lambda row: shapely.wkb.dumps(row.geometry),axis=1)
gdf.drop(columns=['geometry','mask'],inplace=True)
# print(type(gdf.geom.iloc[0]))
print(gdf)
engine=create_engine('sqlite:///'+'\\\\'.join(db_fp.split('\\')),echo=True)
gdf.to_sql(table_name, con=engine, if_exists='replace', index=False,) #dtype={'geometry': Geometry('POINT')} ;dtype={'geometry': Geometry('POINT',srid=crs.to_epsg())}
print('has been written to into the SQLite database...')
def gpd2postSQL(gdf,table_name,**kwargs):
from sqlalchemy import create_engine
# engine=create_engine("postgres://postgres:123456@localhost:5432/workshop-LA-UP_IIT")
engine=create_engine("postgres://{myusername}:{mypassword}@localhost:5432/{mydatabase}".format(myusername=kwargs['myusername'],mypassword=kwargs['<PASSWORD>'],mydatabase=kwargs['mydatabase']))
gdf.to_postgis(table_name, con=engine, if_exists='replace', index=False,)
print("_"*50)
print('has been written to into the PostSQL database...')
def postSQL2gpd(table_name,geom_col='geometry',**kwargs):
from sqlalchemy import create_engine
import geopandas as gpd
engine=create_engine("postgres://{myusername}:{mypassword}@localhost:5432/{mydatabase}".format(myusername=kwargs['myusername'],mypassword=kwargs['<PASSWORD>'],mydatabase=kwargs['mydatabase']))
gdf=gpd.read_postgis(table_name, con=engine,geom_col=geom_col)
print("_"*50)
print('The data has been read from PostSQL database...')
return gdf
def raster2postSQL(raster_fn,**kwargs):
from osgeo import gdal, osr
import psycopg2
import subprocess
from pathlib import Path
raster=gdal.Open(raster_fn)
# print(raster)
proj=osr.SpatialReference(wkt=raster.GetProjection())
print(proj)
projection=str(proj.GetAttrValue('AUTHORITY',1))
gt=raster.GetGeoTransform()
pixelSizeX=str(round(gt[1]))
pixelSizeY=str(round(-gt[5]))
# cmds='raster2pgsql -s '+projection+' -I -C -M "'+raster_fn+'" -F -t '+pixelSizeX+'x'+pixelSizeY+' public.'+'uu'+' | psql -d {mydatabase} -U {myusername} -h localhost -p 5432'.format(mydatabase=kwargs['mydatabase'],myusername=kwargs['myusername'])
cmds='raster2pgsql -s '+projection+' -I -M "'+raster_fn+'" -F -t '+pixelSizeX+'x'+pixelSizeY+' public.'+Path(raster_fn).stem+' | psql -d {mydatabase} -U {myusername} -h localhost -p 5432'.format(mydatabase=kwargs['mydatabase'],myusername=kwargs['myusername'])
print("_"*50)
print(cmds)
subprocess.call(cmds, shell=True)
print("_"*50)
print('The raster has been loaded into PostSQL...')
if __name__=="__main__":
#a-create or connect to the database
db_file=r'./database/workshop_LAUP_iit.db'
# sql_w=SQLite_handle(db_file)
# sql_w.create_connection()
#b-create boundary
#method_a
# kml_extent=data_dic['qingliangMountain_boundary']
# boudnary_polygon=boundary_buffer_centroidCircle(kml_extent,nanjing_epsg,bounadry_type='buffer_circle',buffer_distance=5000) #'buffer_circle';'buffer_offset'
#c-road_network_kml
# road_gdf=kml2gdf(data_dic['road_network'],epsg=nanjing_epsg,boundary=boudnary_polygon)
# road_gdf.plot()
# gpd2postSQL(road_gdf,table_name='road_network',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#d-02_building footprint
# buildingFootprint=shp2gdf(data_dic['building_footprint'],epsg=nanjing_epsg,boundary=boudnary_polygon)
# buildingFootprint.plot(column='Floor',cmap='terrain')
# gpd2postSQL(buildingFootprint,table_name='building_footprint',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
# d-03_bus routes
# bus_routes=shp2gdf(data_dic['bus_routes'],epsg=nanjing_epsg,boundary=None,encoding='GBK')
# bus_routes.plot()
# gpd2postSQL(bus_routes,table_name='bus_routes',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#d-04_bus station
# bus_stations=shp2gdf(data_dic['bus_stations'],epsg=nanjing_epsg,boundary=boudnary_polygon,encoding='GBK')
# bus_stations.plot()
# gpd2postSQL(bus_stations,table_name='bus_stations',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#d-05_subway lines
# subway_lines=shp2gdf(data_dic['subway_lines'],epsg=nanjing_epsg,encoding='GBK')
# subway_lines.plot()
# gpd2postSQL(subway_lines,table_name='subway_lines',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#d-06_subway stations
# subway_stations=shp2gdf(data_dic['subway_stations'],epsg=nanjing_epsg,boundary=boudnary_polygon,encoding='GBK')
# subway_stations.plot()
# gpd2postSQL(subway_stations,table_name='subway_stations',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#d-07_population
# population=shp2gdf(data_dic['population'],epsg=nanjing_epsg,boundary=boudnary_polygon,encoding='GBK')
# population.plot(column='Population',cmap='hot')
# gpd2postSQL(population,table_name='population',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#e-A-08_Nanjing taxi data
# g_df_dict=csv2gdf_A_taxi(data_dic['taxi'],epsg=nanjing_epsg,boundary=boudnary_polygon)
# taxi_keys=list(g_df_dict.keys())
# print(taxi_keys)
# g_df_dict[taxi_keys[0]].plot(column=taxi_keys[0],cmap='hot')
# for key in taxi_keys:
# gpd2postSQL(g_df_dict[key],table_name='taxi_{}'.format(key),myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#e-B-09_POI
# POI=csv2gdf_A_POI(data_dic['POI'],epsg=nanjing_epsg,boundary=boudnary_polygon,encoding='GBK')
# POI.plot(column='superclass',cmap='terrain',markersize=1)
# POI.rename({'唯一ID':'ID',
# 'POI名称':"Name",
# 'POI类型':"class",
# 'POI类型编号':"class_idx",
# '行业类型':"industry_class",
# '地址':"address",
# '经度':"lon",
# '纬度':'lat',
# 'POI所在省份名称':"province",
# 'POI所在城市名称':"city",
# '区域编码':"reginal_code",
# # 'superclass',
# # 'geometry',
# # 'mask'
# },axis=1,inplace=True)
# #table name should be the low case
# gpd2postSQL(POI,table_name='poi',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
# f-10_ Metro Weibo(microblog) publish
# microblog=db2df(data_dic['microblog'],'NajingMetro')
#g-11_bike sharing/ no data were available for Nanjing area
# bike_sharing=xls2gdf(data_dic['bike_sharing'],epsg=nanjing_epsg,boundary=boudnary_polygon,sheet_name='共享单车数据a')
# bike_sharing.plot()
#h-12_sentinel-2-NDVI
# ndvi_fn=r'C:\Users\richi\omen_richiebao\omen_IIIT\workshop_LA_UP_iit\data\RS\NDVI.tif'
# sentinel_2_NDVI=sentinel_2_NDVI(data_dic['sentinel_2'],ndvi_fn)
# #i-12-01_raster crop
# ndvi_cropped=raster_crop(raster_fn=ndvi_fn,crop_shp_fn='./data/GIS/b_centroid_buffer.shp',boundary=boudnary_polygon) #,cropped_fn='./data/GIS/NDVI_cropped.tif'
# ndvi_cropped.plot(column='raster_val',cmap='terrain',markersize=1)
# gpd2postSQL(ndvi_cropped,table_name='ndvi',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#I-write GeoDataFrame into SQLite database
# gpd2SQLite(population,db_file,table_name='population')
#G-write GeoDataFrame into PostgreSQL
# gpd2postSQL(population,table_name='population',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#G-read GeoDataFrame from PostgreSQL
# population_postsql=postSQL2gpd(table_name='population',geom_col='geometry',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
# population_postsql.plot(column='Population',cmap='hot')
#H-load raster into postGreSQL
# raster2postSQL(ndvi_fn,table_name='ndvi',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
#comprehensive park
comprehensive_park=kml2gdf_folder(data_dic['comprehensive_park'],epsg=nanjing_epsg,boundary=None)
gpd2postSQL(comprehensive_park,table_name='comprehensive_park',myusername='postgres',mypassword='<PASSWORD>',mydatabase='workshop-LA-UP_IIT')
| [
"osgeo.gdal.Open",
"numpy.ma.masked_equal",
"pandas.read_csv",
"io.BytesIO",
"earthpy.spatial.stack",
"pyproj.Transformer.from_crs",
"shapely.geometry.Polygon",
"earthpy.plot.plot_bands",
"earthpy.spatial.crop_image",
"copy.deepcopy",
"pandas.read_excel",
"geopandas.points_from_xy",
"xml.etr... | [((2833, 2872), 'geopandas.read_file', 'gpd.read_file', (['kml_extent'], {'driver': '"""KML"""'}), "(kml_extent, driver='KML')\n", (2846, 2872), True, 'import geopandas as gpd\n'), ((2913, 2936), 'pyproj.CRS', 'pyproj.CRS', (['"""EPSG:4326"""'], {}), "('EPSG:4326')\n", (2923, 2936), False, 'import pyproj\n'), ((3082, 3133), 'shapely.ops.transform', 'transform', (['project', 'boundary_gdf.geometry.values[0]'], {}), '(project, boundary_gdf.geometry.values[0])\n', (3091, 3133), False, 'from shapely.ops import transform\n'), ((4446, 4464), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {}), '()\n', (4462, 4464), True, 'import geopandas as gpd\n'), ((5748, 5766), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {}), '()\n', (5764, 5766), True, 'import geopandas as gpd\n'), ((7085, 7121), 'geopandas.read_file', 'gpd.read_file', (['fn'], {'encoding': 'encoding'}), '(fn, encoding=encoding)\n', (7098, 7121), True, 'import geopandas as gpd\n'), ((9252, 9286), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'encoding': 'encoding'}), '(fn, encoding=encoding)\n', (9263, 9286), True, 'import pandas as pd\n'), ((9867, 9896), 'sqlite3.connect', 'sqlite3.connect', (['database_sql'], {}), '(database_sql)\n', (9882, 9896), False, 'import sqlite3\n'), ((10168, 10208), 'pandas.read_excel', 'pd.read_excel', (['fn'], {'sheet_name': 'sheet_name'}), '(fn, sheet_name=sheet_name)\n', (10181, 10208), True, 'import pandas as pd\n'), ((10393, 10462), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['xls_df'], {'geometry': 'xls_df.route_line', 'crs': '"""epsg:4326"""'}), "(xls_df, geometry=xls_df.route_line, crs='epsg:4326')\n", (10409, 10462), True, 'import geopandas as gpd\n'), ((11048, 11071), 'xml.etree.ElementTree.parse', 'ET.parse', (['MTD_MSIL2A_fn'], {}), '(MTD_MSIL2A_fn)\n', (11056, 11071), True, 'import xml.etree.ElementTree as ET\n'), ((12528, 12575), 'os.path.join', 'os.path.join', (['sentinel_2_root', '"""MTD_MSIL2A.xml"""'], {}), "(sentinel_2_root, 'MTD_MSIL2A.xml')\n", (12540, 12575), False, 'import os\n'), ((12867, 12888), 'earthpy.spatial.stack', 'es.stack', (['stack_bands'], {}), '(stack_bands)\n', (12875, 12888), True, 'import earthpy.spatial as es\n'), ((14325, 14368), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['cropped_img[0]', '(-9999.0)'], {}), '(cropped_img[0], -9999.0)\n', (14343, 14368), True, 'import numpy as np\n'), ((14404, 14463), 'earthpy.plot.plot_bands', 'ep.plot_bands', (['cropped_img_mask'], {'cmap': '"""terrain"""', 'cbar': '(False)'}), "(cropped_img_mask, cmap='terrain', cbar=False)\n", (14417, 14463), True, 'import earthpy.plot as ep\n'), ((14822, 14859), 'geopandas.GeoDataFrame.from_features', 'gpd.GeoDataFrame.from_features', (['geoms'], {}), '(geoms)\n', (14852, 14859), True, 'import geopandas as gpd\n'), ((15387, 15406), 'copy.deepcopy', 'copy.deepcopy', (['gdf_'], {}), '(gdf_)\n', (15400, 15406), False, 'import copy\n'), ((17036, 17095), 'geopandas.read_postgis', 'gpd.read_postgis', (['table_name'], {'con': 'engine', 'geom_col': 'geom_col'}), '(table_name, con=engine, geom_col=geom_col)\n', (17052, 17095), True, 'import geopandas as gpd\n'), ((17353, 17373), 'osgeo.gdal.Open', 'gdal.Open', (['raster_fn'], {}), '(raster_fn)\n', (17362, 17373), False, 'from osgeo import gdal, osr\n'), ((18180, 18213), 'subprocess.call', 'subprocess.call', (['cmds'], {'shell': '(True)'}), '(cmds, shell=True)\n', (18195, 18213), False, 'import subprocess\n'), ((2997, 3052), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['wgs84', 'utm'], {'always_xy': '(True)'}), '(wgs84, utm, always_xy=True)\n', (3024, 3052), False, 'import pyproj\n'), ((3499, 3599), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'x': [b_centroid.x], 'y': [b_centroid.y], 'geometry': [b_centroid]}"], {'crs': 'utm'}), "({'x': [b_centroid.x], 'y': [b_centroid.y], 'geometry': [\n b_centroid]}, crs=utm)\n", (3515, 3599), True, 'import geopandas as gpd\n'), ((4487, 4507), 'fiona.listlayers', 'fiona.listlayers', (['fn'], {}), '(fn)\n', (4503, 4507), False, 'import fiona, io\n'), ((4569, 4596), 'fiona.open', 'fiona.open', (['fn'], {'layer': 'layer'}), '(fn, layer=layer)\n', (4579, 4596), False, 'import fiona, io\n'), ((5789, 5809), 'fiona.listlayers', 'fiona.listlayers', (['fn'], {}), '(fn)\n', (5805, 5809), False, 'import fiona, io\n'), ((5871, 5898), 'fiona.open', 'fiona.open', (['fn'], {'layer': 'layer'}), '(fn, layer=layer)\n', (5881, 5898), False, 'import fiona, io\n'), ((12742, 12789), 'os.path.join', 'os.path.join', (['sentinel_2_root', 'band_fns_dict[b]'], {}), '(sentinel_2_root, band_fns_dict[b])\n', (12754, 12789), False, 'import os\n'), ((13121, 13158), 'rasterio.open', 'rio.open', (['save_path', '"""w"""'], {}), "(save_path, 'w', **meta_data)\n", (13129, 13158), True, 'import rasterio as rio\n'), ((13568, 13587), 'rasterio.open', 'rio.open', (['raster_fn'], {}), '(raster_fn)\n', (13576, 13587), True, 'import rasterio as rio\n'), ((13951, 13970), 'rasterio.open', 'rio.open', (['raster_fn'], {}), '(raster_fn)\n', (13959, 13970), True, 'import rasterio as rio\n'), ((14013, 14046), 'earthpy.spatial.crop_image', 'es.crop_image', (['src', 'crop_boundary'], {}), '(src, crop_boundary)\n', (14026, 14046), True, 'import earthpy.spatial as es\n'), ((1942, 1971), 'sqlite3.connect', 'sqlite3.connect', (['self.db_file'], {}), '(self.db_file)\n', (1957, 1971), False, 'import sqlite3\n'), ((3840, 3878), 'shapely.geometry.Polygon', 'Polygon', (['boundary_proj.exterior.coords'], {}), '(boundary_proj.exterior.coords)\n', (3847, 3878), False, 'from shapely.geometry import Point, LinearRing, Polygon\n'), ((4673, 4685), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (4683, 4685), False, 'import fiona, io\n'), ((5076, 5111), 'geopandas.read_file', 'gpd.read_file', (['buffer'], {'driver': '"""KML"""'}), "(buffer, driver='KML')\n", (5089, 5111), True, 'import geopandas as gpd\n'), ((5975, 5987), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5985, 5987), False, 'import fiona, io\n'), ((6543, 6578), 'geopandas.read_file', 'gpd.read_file', (['buffer'], {'driver': '"""KML"""'}), "(buffer, driver='KML')\n", (6556, 6578), True, 'import geopandas as gpd\n'), ((9456, 9502), 'geopandas.points_from_xy', 'gpd.points_from_xy', (["csv_df['经度']", "csv_df['纬度']"], {}), "(csv_df['经度'], csv_df['纬度'])\n", (9474, 9502), True, 'import geopandas as gpd\n'), ((10305, 10373), 'shapely.geometry.LineString', 'LineString', (["[(row['开始维度'], row['开始经度']), (row['结束维度'], row['结束经度'])]"], {}), "([(row['开始维度'], row['开始经度']), (row['结束维度'], row['结束经度'])])\n", (10315, 10373), False, 'from shapely.geometry import LineString\n'), ((13705, 13731), 'geopandas.read_file', 'gpd.read_file', (['crop_shp_fn'], {}), '(crop_shp_fn)\n', (13718, 13731), True, 'import geopandas as gpd\n'), ((15703, 15734), 'shapely.wkb.dumps', 'shapely.wkb.dumps', (['row.geometry'], {}), '(row.geometry)\n', (15720, 15734), False, 'import shapely\n'), ((4714, 4745), 'fiona.open', 'fiona.open', (['buffer', '"""w"""'], {}), "(buffer, 'w', **meta)\n", (4724, 4745), False, 'import fiona, io\n'), ((6016, 6047), 'fiona.open', 'fiona.open', (['buffer', '"""w"""'], {}), "(buffer, 'w', **meta)\n", (6026, 6047), False, 'import fiona, io\n'), ((8555, 8604), 'geopandas.points_from_xy', 'gpd.points_from_xy', (['g_df.longitude', 'g_df.latitude'], {}), '(g_df.longitude, g_df.latitude)\n', (8573, 8604), True, 'import geopandas as gpd\n'), ((17986, 18001), 'pathlib.Path', 'Path', (['raster_fn'], {}), '(raster_fn)\n', (17990, 18001), False, 'from pathlib import Path\n'), ((3332, 3406), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'area': c_area, 'geometry': b_centroid_buffer}"], {'crs': 'utm'}), "({'area': c_area, 'geometry': b_centroid_buffer}, crs=utm)\n", (3348, 3406), True, 'import geopandas as gpd\n'), ((4081, 4148), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'area': LR_area, 'geometry': LR_buffer}"], {'crs': 'utm'}), "({'area': LR_area, 'geometry': LR_buffer}, crs=utm)\n", (4097, 4148), True, 'import geopandas as gpd\n'), ((8236, 8251), 'pandas.read_csv', 'pd.read_csv', (['fn'], {}), '(fn)\n', (8247, 8251), True, 'import pandas as pd\n'), ((8018, 8026), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (8022, 8026), False, 'from pathlib import Path\n')] |
"""
Plot figures for the TreeTime validation, comparison with other methods on the
simulated dataset.
To plot the validation results, CSV files generated by the
'generate_simulated_data.py' script are required.
The script plots the reconstruction of the mutation rate and the tiome of the
most recent common ancestor in comparison with other methods (LSD, BEAST)
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import os, sys
import pandas
from Bio import Phylo
import utility_functions_beast as beast_utils
import utility_functions_simulated_data as sim_utils
from plot_defaults import *
def read_treetime_results_csv(fname):
"""
Read results of the TreeTime simulations
Args:
- fname: path to the input file
Returns:
- df: Table of results as pandas data-frame
"""
columns = ['File', 'Sim_Tmrca', 'Tmrca', 'mu', 'R', 'R2_int']
df = pandas.read_csv(fname, names=columns,header=0)
#filter obviously failed simulations
df = df[[len(str(k)) > 10 for k in df.File]]
df = df[df.R > 0.1]
# some very basic preprocessing
df['dTmrca'] = -(df['Sim_Tmrca'] - df['Tmrca'])
df['Sim_mu'] = map(lambda x: float(x.split("/")[-1].split('_')[6][2:]), df.File)
df['Ns'] = map(lambda x: int(x.split("/")[-1].split('_')[3][2:]), df.File)
df['Ts'] = map(lambda x: int(x.split("/")[-1].split('_')[4][2:]), df.File)
df['N'] = map(lambda x: int(x.split("/")[-1].split('_')[2][1:]), df.File)
df['T'] = df['Ns']*df['Ts']
df['Nmu'] = (df['N']*df['Sim_mu'])
return df
def read_lsd_results_csv(fname):
"""
Read results of the LSd simulations
Args:
- fname: path to the input file
Returns:
- df: Table of results as pandas data-frame
"""
columns = ['File', 'Sim_Tmrca', 'Tmrca', 'mu', 'obj']
df = pandas.read_csv(fname, names=columns,header=0)
# Filter out obviously wrong data
df = df[[len(k) > 10 for k in df.File]]
#Some basic preprocessing
df['dTmrca'] = -(df['Sim_Tmrca'] - df['Tmrca'])
df['Sim_mu'] = map(lambda x: float(x.split("/")[-1].split('_')[6][2:]), df.File)
df['Ns'] = map(lambda x: int(x.split("/")[-1].split('_')[3][2:]), df.File)
df['Ts'] = map(lambda x: int(x.split("/")[-1].split('_')[4][2:]), df.File)
df['N'] = map(lambda x: int(x.split("/")[-1].split('_')[2][1:]), df.File)
df['T'] = df['Ns']*df['Ts']
df['Nmu'] = (df['N']*df['Sim_mu'])
return df
def read_beast_results_csv(fname):
"""
Read results of the BEAST simulations
Args:
- fname: path to the input file
Returns:
- df: Table of results as pandas data-frame
"""
columns = ['File', 'N', 'Sim_Tmrca', 'Sim_mu', 'Ns', 'Ts', 'T', 'Nmu',
'LH', 'LH_std', 'Tmrca', 'Tmrca_std', 'mu', 'mu_std']
df = pandas.read_csv(fname, names=columns,header=0)
df = df[[len(k) > 10 for k in df.File]]
#import ipdb; ipdb.set_trace()
df['dTmrca'] = -(df['Sim_Tmrca'] - df['Tmrca'])
return df
def create_pivot_table(df, T_over_N=None, mean_or_median='median'):
"""
Create the pivot table to plot from the raw dataframe.
Args:
- df (pandas.DataFrame): the raw dataframe as read from a CSV file. Regardless
of the source data (TreeTime, LSD, or BEAST), dataframe is processed in the
unified way as soon as it has the following columns:
- T: (the tot evolution time, or the tree diameter)
- N: (population size)
- Nmu: (N*Mu - product of the pop sizez to the mutation rate used in simulations)
- Sim_mu: mutation rate used in simulations
- mu: reconstructed mutation rate
- dTmrca: difference between the real and reconstructed Tmrca values
- T_over_N(float or None): the total evolution time expressed in the expected
coalescence times scale. If not None, only those datapoints with correspnditng
T/N values will be left for the pivot. Otherwise, no filtering is performed.
By default: the following values are available:
- 2.
- 4.
- 10.
NOTE: any other values can be produced by re-running the simulations
(generate_simulated_data_submit.py script) with other parameters
- mean_or_median(str, possible values: 'mean', 'median'): how errorbars should
be calculated.
- 'mean': the datapoint is placed in the mean position, errorbars show
the standard deviation
- 'median': datapoint is the median of the distribution, the errorbars are
quinatiles.
"""
if T_over_N is not None:
DF = df[ df["T"] / df["N"] == T_over_N ]
else:
DF = df
N_MUS = np.unique(DF.Nmu)
N_MUS_idxs = np.ones(N_MUS.shape, dtype=bool)
mu_mean = []
mu_err = []
tmrca_mean = []
tmrca_err = []
for idx, N_MU in enumerate(N_MUS):
idxs = DF.Nmu == N_MU
if idxs.sum() == 0:
N_MUS_idxs[idx] = False
continue
dMu = -(DF.Sim_mu[idxs] - DF.mu[idxs])/DF.Sim_mu[idxs]
dMu.sort_values(inplace=True)
#dMu = dMu[int(dMu.shape[0]*0.05) : int(dMu.shape[0]*0.95)]
dTmrca = DF.dTmrca[idxs]/DF.N[idxs]
dTmrca.sort_values(inplace=True)
#dTmrca = dTmrca[int(dTmrca.shape[0]*0.05) : int(dTmrca.shape[0]*0.95)]
if mean_or_median == "mean":
mu_mean.append(np.mean(dMu))
mu_err.append(np.std(dMu))
tmrca_mean.append(np.mean(dTmrca))
tmrca_err.append(np.std(dTmrca))
else:
q75, q25 = np.percentile(dMu, [75 ,25])
mu_err.append((q75 - q25)) #np.std(DF.dTmrca[idxs])
mu_mean.append(np.median(dMu))
q75, q25 = np.percentile(dTmrca, [75 ,25])
tmrca_err.append((q75 - q25)) #np.std(DF.dTmrca[idxs])
tmrca_mean.append(np.median(dTmrca))
res = pandas.DataFrame({
"Nmu" : N_MUS[N_MUS_idxs],
"dMu_mean" : mu_mean,
"dMu_err" : mu_err,
"dTmrca_mean" : tmrca_mean,
"dTmrca_err" : tmrca_err,
})
res = res.sort_values(by='Nmu')
return res
def plot_simulated_data(Tmrca_or_Mu,
treetime_pivot=None, lsd_pivot=None, beast_pivot=None,
figname=None, plot_idxs=None):
"""
TODO
"""
from plot_defaults import shift_point_by_markersize
fig = plt.figure(figsize=onecolumn_figsize)
axes = fig.add_subplot(111)
axes.grid('on')
axes.set_xscale('log')
if Tmrca_or_Mu == 'Mu':
mean = 'dMu_mean'
err = 'dMu_err'
title = "Clock rate deviation"
ylabel = "relative clock rate error, $[\Delta\mu / \mu]$"
text_overestimated = '$\mathrm{\mu}$ overestimated'
text_underestimated = '$\mathrm{\mu}$ underestimated'
elif Tmrca_or_Mu == 'Tmrca':
mean = 'dTmrca_mean'
err = 'dTmrca_err'
title = "Accuracy of Tmrca prediction"
ylabel = "relative $T_{mrca}$ error, $[\Delta\mathrm{T_{mrca}} / \mathrm{N}]$"
text_overestimated = '$\mathrm{T_{mrca}}$ too late'
text_underestimated = '$\mathrm{T_{mrca}}$ too early'
else:
raise Exception("Unknown plot type!")
# Plot treetime
if treetime_pivot is not None:
x, y = shift_point_by_markersize(axes, treetime_pivot["Nmu"], treetime_pivot[mean], +markersize*.75)
if plot_idxs is None:
tt_plot_idxs = np.ones(x.shape[0] ,dtype=bool)
else:
tt_plot_idxs = plot_idxs
axes.errorbar(x[tt_plot_idxs],
y[tt_plot_idxs],
(treetime_pivot[err].values/2)[tt_plot_idxs],
fmt='-',
marker='o',
markersize=markersize,
#markerfacecolor='w',
markeredgecolor=tt_color,
mew=1.3,
c=tt_color, label="TreeTime")
# Plot BEAST
if beast_pivot is not None:
if plot_idxs is None:
beast_plot_idxs = np.ones(beast_pivot.shape[0] ,dtype=bool)
else:
beast_plot_idxs = plot_idxs
axes.errorbar(beast_pivot["Nmu"].loc[beast_plot_idxs].values,
beast_pivot[mean].loc[beast_plot_idxs].values,
beast_pivot[err].loc[beast_plot_idxs].values,
marker='o',
markersize=markersize,
c=beast_color,
label="BEAST")
# Plot LSD
if lsd_pivot is not None:
x, y = shift_point_by_markersize(axes, lsd_pivot["Nmu"], lsd_pivot[mean], +markersize/2)
if plot_idxs is None:
lsd_plot_idxs = np.ones(x.shape[0] ,dtype=bool)
else:
lsd_plot_idxs = plot_idxs
axes.errorbar(x[lsd_plot_idxs],
y[lsd_plot_idxs],
(lsd_pivot[err].values/2)[lsd_plot_idxs],
fmt='-',
marker='o',
markersize=markersize,
c=lsd_color,
label="LSD")
plt.hlines(0, 0, 1)
axes.legend(loc=1,fontsize=legend_fs)
#axes.set_title(title)
axes.set_ylabel(ylabel, fontsize = label_fs)
axes.set_xlabel('diversity, $\mathrm{N}\cdot\mu$', fontsize = label_fs)
for label in axes.get_xticklabels():
label.set_fontsize(tick_fs)
for label in axes.get_yticklabels():
label.set_fontsize(tick_fs)
fig.text(0.15, 0.85, text_overestimated, fontsize=tick_fs)
fig.text(0.15, 0.15, text_underestimated, fontsize=tick_fs)
if figname is not None:
for fmt in formats:
fig.savefig("{}.{}".format(figname, fmt))
if __name__ == '__main__':
##
## Configure the parameters
##
"""
Specify the total evolution time, or the tree diameter (T) relative to the
coalescence time, as expected from the neutral theory (N). The values of the
T_over_N can be varied by re-running the simulations with different evolution
time or sampling frequencies. By default, the following parameters are available:
- 2.0
- 4.0
- 10.0
"""
T_over_N = 10.
"""
What should be used to calculate the error bars and the position of the data
points.
Possible values:
- mean: the point is plotted in the mean of the distribution, the error bars \
show the standard deviation of the distribution
- median: the data point is set to the median of the distribution, the error
bars show the quantiles of the distribution
"""
mean_or_median = 'median'
"""
Should save figures? If True, note the figure name in the plot_simulated_data
function parameters.
"""
SAVE_FIG = True
##
## Set the CSV file names with the data to plot
##
# files with the reconstruction results:
treetime_csv = "./simulated_data/_treetime_fasttree_res.csv"
lsd_csv = "./simulated_data/_lsd_fasttree_res.csv"
beast_csv = "./simulated_data/_beast_res.csv"
##
## Read, process and plot the data
##
# read csv's to the pandas dataframes:
treetime_df = read_treetime_results_csv(treetime_csv)
lsd_df = read_lsd_results_csv(lsd_csv)
beast_df = read_beast_results_csv(beast_csv)
# make pivot tables and filter only the relevant parameters:
lsd_pivot = create_pivot_table(lsd_df, T_over_N=T_over_N, mean_or_median=mean_or_median)
beast_pivot = create_pivot_table(beast_df, T_over_N=T_over_N, mean_or_median=mean_or_median)
treetime_pivot = create_pivot_table(treetime_df, T_over_N=T_over_N, mean_or_median=mean_or_median)
# plot the data: and save figures if needed:
# plot Tmrca figure:
plot_simulated_data('Tmrca', treetime_pivot, lsd_pivot, beast_pivot,
figname="./figs/simdata_Tmrca_TN{}_{}".format(T_over_N, mean_or_median) if SAVE_FIG else None,
#plot_idxs=np.array([1,2,4,6,7,9,10])
)
# plot Mu figure
plot_simulated_data('Mu', treetime_pivot, lsd_pivot, beast_pivot,
figname="./figs/simdata_Mu_TN{}_{}".format(T_over_N, mean_or_median) if SAVE_FIG else None,
#plot_idxs=np.array([1,2,4,6,7,9,10])
)
| [
"numpy.mean",
"numpy.median",
"numpy.ones",
"numpy.unique",
"pandas.read_csv",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.figure",
"plot_defaults.shift_point_by_markersize",
"numpy.std",
"pandas.DataFrame",
"numpy.percentile"
] | [((954, 1001), 'pandas.read_csv', 'pandas.read_csv', (['fname'], {'names': 'columns', 'header': '(0)'}), '(fname, names=columns, header=0)\n', (969, 1001), False, 'import pandas\n'), ((1881, 1928), 'pandas.read_csv', 'pandas.read_csv', (['fname'], {'names': 'columns', 'header': '(0)'}), '(fname, names=columns, header=0)\n', (1896, 1928), False, 'import pandas\n'), ((2861, 2908), 'pandas.read_csv', 'pandas.read_csv', (['fname'], {'names': 'columns', 'header': '(0)'}), '(fname, names=columns, header=0)\n', (2876, 2908), False, 'import pandas\n'), ((4710, 4727), 'numpy.unique', 'np.unique', (['DF.Nmu'], {}), '(DF.Nmu)\n', (4719, 4727), True, 'import numpy as np\n'), ((4745, 4777), 'numpy.ones', 'np.ones', (['N_MUS.shape'], {'dtype': 'bool'}), '(N_MUS.shape, dtype=bool)\n', (4752, 4777), True, 'import numpy as np\n'), ((5910, 6050), 'pandas.DataFrame', 'pandas.DataFrame', (["{'Nmu': N_MUS[N_MUS_idxs], 'dMu_mean': mu_mean, 'dMu_err': mu_err,\n 'dTmrca_mean': tmrca_mean, 'dTmrca_err': tmrca_err}"], {}), "({'Nmu': N_MUS[N_MUS_idxs], 'dMu_mean': mu_mean, 'dMu_err':\n mu_err, 'dTmrca_mean': tmrca_mean, 'dTmrca_err': tmrca_err})\n", (5926, 6050), False, 'import pandas\n'), ((6379, 6416), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'onecolumn_figsize'}), '(figsize=onecolumn_figsize)\n', (6389, 6416), True, 'import matplotlib.pyplot as plt\n'), ((8967, 8986), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (8977, 8986), True, 'import matplotlib.pyplot as plt\n'), ((7277, 7377), 'plot_defaults.shift_point_by_markersize', 'shift_point_by_markersize', (['axes', "treetime_pivot['Nmu']", 'treetime_pivot[mean]', '(+markersize * 0.75)'], {}), "(axes, treetime_pivot['Nmu'], treetime_pivot[mean],\n +markersize * 0.75)\n", (7302, 7377), False, 'from plot_defaults import shift_point_by_markersize\n'), ((8463, 8551), 'plot_defaults.shift_point_by_markersize', 'shift_point_by_markersize', (['axes', "lsd_pivot['Nmu']", 'lsd_pivot[mean]', '(+markersize / 2)'], {}), "(axes, lsd_pivot['Nmu'], lsd_pivot[mean], +\n markersize / 2)\n", (8488, 8551), False, 'from plot_defaults import shift_point_by_markersize\n'), ((5591, 5619), 'numpy.percentile', 'np.percentile', (['dMu', '[75, 25]'], {}), '(dMu, [75, 25])\n', (5604, 5619), True, 'import numpy as np\n'), ((5750, 5781), 'numpy.percentile', 'np.percentile', (['dTmrca', '[75, 25]'], {}), '(dTmrca, [75, 25])\n', (5763, 5781), True, 'import numpy as np\n'), ((7429, 7460), 'numpy.ones', 'np.ones', (['x.shape[0]'], {'dtype': 'bool'}), '(x.shape[0], dtype=bool)\n', (7436, 7460), True, 'import numpy as np\n'), ((7984, 8025), 'numpy.ones', 'np.ones', (['beast_pivot.shape[0]'], {'dtype': 'bool'}), '(beast_pivot.shape[0], dtype=bool)\n', (7991, 8025), True, 'import numpy as np\n'), ((8603, 8634), 'numpy.ones', 'np.ones', (['x.shape[0]'], {'dtype': 'bool'}), '(x.shape[0], dtype=bool)\n', (8610, 8634), True, 'import numpy as np\n'), ((5408, 5420), 'numpy.mean', 'np.mean', (['dMu'], {}), '(dMu)\n', (5415, 5420), True, 'import numpy as np\n'), ((5448, 5459), 'numpy.std', 'np.std', (['dMu'], {}), '(dMu)\n', (5454, 5459), True, 'import numpy as np\n'), ((5492, 5507), 'numpy.mean', 'np.mean', (['dTmrca'], {}), '(dTmrca)\n', (5499, 5507), True, 'import numpy as np\n'), ((5538, 5552), 'numpy.std', 'np.std', (['dTmrca'], {}), '(dTmrca)\n', (5544, 5552), True, 'import numpy as np\n'), ((5711, 5725), 'numpy.median', 'np.median', (['dMu'], {}), '(dMu)\n', (5720, 5725), True, 'import numpy as np\n'), ((5879, 5896), 'numpy.median', 'np.median', (['dTmrca'], {}), '(dTmrca)\n', (5888, 5896), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import sys
sys.path.append('build')
import kosutils
from tracker import *
# Setting the dimensions for output window
H = 700
W = 700
dispWindow = np.zeros((H,W,3),dtype=np.uint8)
PREDICTOR_PATH = "../shape_predictor_5_face_landmarks.dat"
# Creating the object for obj3D class
obj1 = kosutils.kos_Obj3D(dispWindow.shape[:2])
# Creating the object for kos_vcam class
cam1 = kosutils.kos_vcam(dispWindow.shape[:2])
cap = cv2.VideoCapture(0)
tr = tracker(PREDICTOR_PATH)
angle = 0
diff = 0
while True:
ret, frame = cap.read()
if ret:
frame = cv2.flip(frame,1)
img = np.copy(frame)
size = img.shape[:2]
x,y = tr.getNose(p1x_,p1y_,frame)
p1x_ = x
p1y_ = y
# rect = tr.rect
angle+=2*np.pi/180;
if(angle > 2*np.pi):
angle = 0
# try:
# cv2.rectangle(frame,(rect[0],rect[1]),(rect[2],rect[3]),(0,255,0),3)
# except:
# pass
# cv2.imshow("Face Tracking",frame)
# cv2.waitKey(1)
drift_x = x - size[1]//2
drift_y = size[0]//2 - y
drift_z = -cam1.focal_length-2*(500 - 2*tr.face_width)
cam1.updtTxMat(drift_x,drift_y,drift_z)
obj1.rotateObj(np.pi/4,angle,np.pi)
cam1.render(obj1.pts3d,dispWindow)
| [
"numpy.copy",
"cv2.flip",
"kosutils.kos_Obj3D",
"numpy.zeros",
"cv2.VideoCapture",
"sys.path.append",
"kosutils.kos_vcam"
] | [((42, 66), 'sys.path.append', 'sys.path.append', (['"""build"""'], {}), "('build')\n", (57, 66), False, 'import sys\n'), ((179, 214), 'numpy.zeros', 'np.zeros', (['(H, W, 3)'], {'dtype': 'np.uint8'}), '((H, W, 3), dtype=np.uint8)\n', (187, 214), True, 'import numpy as np\n'), ((317, 357), 'kosutils.kos_Obj3D', 'kosutils.kos_Obj3D', (['dispWindow.shape[:2]'], {}), '(dispWindow.shape[:2])\n', (335, 357), False, 'import kosutils\n'), ((407, 446), 'kosutils.kos_vcam', 'kosutils.kos_vcam', (['dispWindow.shape[:2]'], {}), '(dispWindow.shape[:2])\n', (424, 446), False, 'import kosutils\n'), ((453, 472), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (469, 472), False, 'import cv2\n'), ((590, 608), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (598, 608), False, 'import cv2\n'), ((622, 636), 'numpy.copy', 'np.copy', (['frame'], {}), '(frame)\n', (629, 636), True, 'import numpy as np\n')] |
from torch.utils.data import Dataset
import torch
from torchvision import transforms
import cv2 as cv
from PIL import Image
import librosa
import os
import numpy as np
def one_hot_encode(x, size):
temp = [0] * size
temp[x] = 1
return temp
from videotransforms.video_transforms import Compose, Resize, RandomCrop, RandomRotation, ColorJitter
from videotransforms.volume_transforms import ClipToTensor
# Dataset for the 3D CNN model, returns sequence of frames (every second frame) from video
class VideoFramesDataset(Dataset):
def __init__(self, frame_size):
super(VideoFramesDataset, self).__init__()
self.root_dir = "../Datasets/emotion_detection/full/"
self.video_names = os.listdir(self.root_dir)
aspect_ratio = 720 / 1280
scale_size = (frame_size[0], int(frame_size[1] / aspect_ratio))
video_transform_list = [
RandomRotation(20),
Resize(scale_size),
RandomCrop(frame_size),
ColorJitter(0.1, 0.1, 0.1, 0.1),
ClipToTensor(channel_nb=3)
]
self.video_transform = Compose(video_transform_list)
'''
Modality (01 = full-AV, 02 = video-only, 03 = audio-only).
Vocal channel (01 = speech, 02 = song).
Emotion (01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised).
Emotional intensity (01 = normal, 02 = strong). NOTE: There is no strong intensity for the 'neutral' emotion.
Statement (01 = "Kids are talking by the door", 02 = "Dogs are sitting by the door").
Repetition (01 = 1st repetition, 02 = 2nd repetition).
Actor (01 to 24. Odd numbered actors are male, even numbered actors are female).
'''
self.emotion_label_dict = {"01": 0,
"02": 1,
"03": 2,
"04": 3,
"05": 4,
"06": 5,
"07": 6,
"08": 7,}
self.emotion_intensity_dict = {"01": 0,
"02": 1,}
def __getitem__(self, idx):
tags = self.video_names[idx].split("-")
#one hot encoding the output
emotion = one_hot_encode(self.emotion_label_dict[tags[2]], 8)
intensity = self.emotion_intensity_dict[tags[3]]
y = np.array(emotion + [intensity])
cap = cv.VideoCapture(self.root_dir + self.video_names[idx])
frames = list()
counter = 0
frames_appended = 0
while True:
return_flag, frame = cap.read()
counter = counter + 1
if not return_flag or frames_appended >= 45:
break
if counter % 2 == 0 and frames_appended <= 45:
frames_appended += 1
frames.append(Image.fromarray(frame*255))
frames = self.video_transform(frames)
return frames, y
def __len__(self):
return len(self.video_names)
if __name__ == "__main__":
videoDataset = VideoFramesDataset((200, 200))
print(videoDataset[0][0].shape)
print(len(videoDataset))
| [
"PIL.Image.fromarray",
"os.listdir",
"videotransforms.video_transforms.Resize",
"videotransforms.volume_transforms.ClipToTensor",
"numpy.array",
"videotransforms.video_transforms.ColorJitter",
"cv2.VideoCapture",
"videotransforms.video_transforms.Compose",
"videotransforms.video_transforms.RandomRot... | [((718, 743), 'os.listdir', 'os.listdir', (['self.root_dir'], {}), '(self.root_dir)\n', (728, 743), False, 'import os\n'), ((1120, 1149), 'videotransforms.video_transforms.Compose', 'Compose', (['video_transform_list'], {}), '(video_transform_list)\n', (1127, 1149), False, 'from videotransforms.video_transforms import Compose, Resize, RandomCrop, RandomRotation, ColorJitter\n'), ((2488, 2519), 'numpy.array', 'np.array', (['(emotion + [intensity])'], {}), '(emotion + [intensity])\n', (2496, 2519), True, 'import numpy as np\n'), ((2535, 2589), 'cv2.VideoCapture', 'cv.VideoCapture', (['(self.root_dir + self.video_names[idx])'], {}), '(self.root_dir + self.video_names[idx])\n', (2550, 2589), True, 'import cv2 as cv\n'), ((906, 924), 'videotransforms.video_transforms.RandomRotation', 'RandomRotation', (['(20)'], {}), '(20)\n', (920, 924), False, 'from videotransforms.video_transforms import Compose, Resize, RandomCrop, RandomRotation, ColorJitter\n'), ((938, 956), 'videotransforms.video_transforms.Resize', 'Resize', (['scale_size'], {}), '(scale_size)\n', (944, 956), False, 'from videotransforms.video_transforms import Compose, Resize, RandomCrop, RandomRotation, ColorJitter\n'), ((970, 992), 'videotransforms.video_transforms.RandomCrop', 'RandomCrop', (['frame_size'], {}), '(frame_size)\n', (980, 992), False, 'from videotransforms.video_transforms import Compose, Resize, RandomCrop, RandomRotation, ColorJitter\n'), ((1006, 1037), 'videotransforms.video_transforms.ColorJitter', 'ColorJitter', (['(0.1)', '(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1, 0.1)\n', (1017, 1037), False, 'from videotransforms.video_transforms import Compose, Resize, RandomCrop, RandomRotation, ColorJitter\n'), ((1051, 1077), 'videotransforms.volume_transforms.ClipToTensor', 'ClipToTensor', ([], {'channel_nb': '(3)'}), '(channel_nb=3)\n', (1063, 1077), False, 'from videotransforms.volume_transforms import ClipToTensor\n'), ((2980, 3008), 'PIL.Image.fromarray', 'Image.fromarray', (['(frame * 255)'], {}), '(frame * 255)\n', (2995, 3008), False, 'from PIL import Image\n')] |
import sys
import matplotlib
#matplotlib.use('Agg')
matplotlib.use('TkAgg') # revert above
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
def ballistic_flight(v0, g, t):
# assumes perfectly verticle launch and are matching units
# v0-initial velocity
# g-gravitational acceleration
# t-np time array
x = v0*t
y = v0*t-0.5*g*t**2
y = np.where(y<0,0,y)
t_apex = v0/g
x_apex = v0*t_apex
y_apex = v0*t_apex-0.5*g*(t_apex)**2
return x, y, t_apex, x_apex, y_apex
m_2_km = 1e-3
v0 = 6e4*m_2_km #km s-1
earth_g = 9.80665 #m s-2
sun_g = 28.02*earth_g*m_2_km # km s-2
t = np.linspace(0,500,1000)
test = ballistic_flight(v0, sun_g, t)
plt.plot(t,test[1])
| [
"matplotlib.use",
"numpy.linspace",
"matplotlib.pyplot.plot",
"numpy.where"
] | [((52, 75), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (66, 75), False, 'import matplotlib\n'), ((643, 668), 'numpy.linspace', 'np.linspace', (['(0)', '(500)', '(1000)'], {}), '(0, 500, 1000)\n', (654, 668), True, 'import numpy as np\n'), ((707, 727), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'test[1]'], {}), '(t, test[1])\n', (715, 727), True, 'import matplotlib.pyplot as plt\n'), ((388, 409), 'numpy.where', 'np.where', (['(y < 0)', '(0)', 'y'], {}), '(y < 0, 0, y)\n', (396, 409), True, 'import numpy as np\n')] |
import pytesseract
import cv2
import numpy as np
pytesseract.pytesseract.tesseract_cmd = "C:/Program Files/Tesseract-OCR/tesseract.exe"
# Error prevention code
def ocr_from_img(path):
img_array = np.fromfile(path, np.uint8)
#한글경로 오류 방지 코드
img = cv2.imdecode(img_array, cv2.IMREAD_UNCHANGED) # img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
adaptive_threshold = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 85, 11)
text = pytesseract.image_to_string(adaptive_threshold, lang='eng')
return text
| [
"numpy.fromfile",
"cv2.adaptiveThreshold",
"cv2.imdecode",
"pytesseract.image_to_string",
"cv2.cvtColor"
] | [((203, 230), 'numpy.fromfile', 'np.fromfile', (['path', 'np.uint8'], {}), '(path, np.uint8)\n', (214, 230), True, 'import numpy as np\n'), ((260, 305), 'cv2.imdecode', 'cv2.imdecode', (['img_array', 'cv2.IMREAD_UNCHANGED'], {}), '(img_array, cv2.IMREAD_UNCHANGED)\n', (272, 305), False, 'import cv2\n'), ((346, 383), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (358, 383), False, 'import cv2\n'), ((410, 506), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(85)', '(11)'], {}), '(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 85, 11)\n', (431, 506), False, 'import cv2\n'), ((523, 582), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['adaptive_threshold'], {'lang': '"""eng"""'}), "(adaptive_threshold, lang='eng')\n", (550, 582), False, 'import pytesseract\n')] |
import numpy as np
import math
def _t(x):
return [[x[i][j] for i in range(len(x))] for j in range(len(x[0]))]
class Geometry:
@classmethod
def fromobjstr(cls, code, flip=False):
from .loader import readobj
from io import BytesIO
obj = readobj(BytesIO(code))
if flip:
objflipface(obj)
return obj
@classmethod
def fromarrays(cls, vertices, faces, texcoords, normals, flip=False):
vertices = np.array(vertices, dtype=np.float32)
faces = np.array(faces, dtype=np.int32)
texcoords = np.array(texcoords, dtype=np.float32)
normals = np.array(normals, dtype=np.float32)
obj = dict(vp=vertices, f=faces, vn=normals, vt=texcoords)
objswapaxis(obj, 1, 2)
if flip:
objflipface(obj)
return obj
@classmethod
def cylinder(cls, semiheight=1, radius=1, N=32):
from .loader import _tri_append
vertices = [(0, 0, -semiheight), (0, 0, +semiheight)]
texcoords = [(0, 0)]
normals = [(0, 0, -1), (0, 0, 1)]
faces = []
for i in range(N):
angle = (i / N) * np.pi * 2
pos = math.cos(angle), math.sin(angle)
rpos = [p * radius for p in pos]
vertices.append((*rpos, -semiheight))
vertices.append((*rpos, +semiheight))
normals.append((*pos, 0))
texcoords.append(pos)
for i in range(N):
j = (i + 1) % N
a, b = i * 2 + 2, j * 2 + 2
c, d = j * 2 + 3, i * 2 + 3
faces.append(_t([[0, a, b], [0, i, j], [0, 0, 0]]))
faces.append(_t([[1, c, d], [0, j, i], [1, 1, 1]]))
_tri_append(faces, _t([[d, c, b, a], [i, j, j, i], [i + 2, j + 2, j + 2, i + 2]]))
return cls.fromarrays(vertices, faces, texcoords, normals, flip=True)
@classmethod
def meshgrid(cls, n):
def _face(x, y):
return np.array([(x, y), (x, y + 1), (x + 1, y + 1), (x + 1, y)])
n_particles = n**2
n_faces = (n - 1)**2
xi = np.arange(n)
yi = np.arange(n)
xs = np.linspace(0, 1, n)
ys = np.linspace(0, 1, n)
uv = np.array(np.meshgrid(xs, ys)).swapaxes(0, 2).reshape(n_particles, 2)
faces = _face(*np.meshgrid(xi[:-1], yi[:-1])).swapaxes(0, 1).swapaxes(1, 2).swapaxes(2, 3)
faces = (faces[1] * n + faces[0]).reshape(n_faces, 4)
pos = np.concatenate([uv * 2 - 1, np.zeros((n_particles, 1))], axis=1)
faces = np.moveaxis(np.array([faces, faces, np.zeros((n_faces, 4), dtype=np.int_)]), 0, 2)
faces = np.concatenate([faces[:, (0, 1, 2)], faces[:, (0, 2, 3)]], axis=0)
normals = np.array([[0, 0, 1]])
return cls.fromarrays(pos, faces, uv, normals)
@classmethod
def cube(cls):
return cls.fromobjstr(b'''o Cube
v 1.0 1.0 -1.0
v 1.0 -1.0 -1.0
v 1.0 1.0 1.0
v 1.0 -1.0 1.0
v -1.0 1.0 -1.0
v -1.0 -1.0 -1.0
v -1.0 1.0 1.0
v -1.0 -1.0 1.0
vt 0.0 0.0
vt 0.0 1.0
vt 1.0 1.0
vt 1.0 0.0
vn 0.0 1.0 0.0
vn 0.0 0.0 1.0
vn -1.0 0.0 0.0
vn 0.0 -1.0 0.0
vn 1.0 0.0 0.0
vn 0.0 0.0 -1.0
f 1/1/1 5/2/1 7/3/1
f 7/3/1 3/4/1 1/1/1
f 4/1/2 3/2/2 7/3/2
f 7/3/2 8/4/2 4/1/2
f 8/1/3 7/2/3 5/3/3
f 5/3/3 6/4/3 8/1/3
f 6/1/4 2/2/4 4/3/4
f 4/3/4 8/4/4 6/1/4
f 2/1/5 1/2/5 3/3/5
f 3/3/5 4/4/5 2/1/5
f 6/1/6 5/2/6 1/3/6
f 1/3/6 2/4/6 6/1/6
''')
def objunpackmtls(obj):
faces = obj['f']
parts = {}
ends = []
for end, name in obj['usemtl']:
ends.append(end)
ends.append(len(faces))
ends.pop(0)
for end, (beg, name) in zip(ends, obj['usemtl']):
if name in parts:
parts[name] = np.concatenate([parts[name], faces[beg:end]], axis=0)
else:
parts[name] = faces[beg:end]
for name in parts.keys():
cur = {}
cur['f'] = parts[name]
cur['vp'] = obj['vp']
cur['vn'] = obj['vn']
cur['vt'] = obj['vt']
# TODO: vertex reachability elimation
parts[name] = cur
return parts
def objmtlids(obj):
faces = obj['f']
mids = np.zeros(shape=len(faces), dtype=np.int32)
ends = []
for end, name in obj['usemtl']:
ends.append(end)
ends.append(len(faces))
ends.pop(0)
names = []
for end, (beg, name) in zip(ends, obj['usemtl']):
if name not in names:
mids[beg:end] = len(names) + 1
names.append(name)
else:
mids[beg:end] = names.index(name) + 1
return mids
def objmerge(obj, other):
obj['f'] = np.concatenate([obj['f'], other['f'] + len(obj['f'])], axis=0)
obj['vp'] = np.concatenate([obj['vp'], other['vp']], axis=0)
obj['vn'] = np.concatenate([obj['vn'], other['vn']], axis=0)
obj['vt'] = np.concatenate([obj['vt'], other['vt']], axis=0)
return obj
def objautoscale(obj):
obj['vp'] -= np.average(obj['vp'], axis=0)
obj['vp'] /= np.max(np.abs(obj['vp']))
def objflipaxis(obj, x=False, y=False, z=False):
for i, flip in enumerate([x, y, z]):
if flip:
obj['vp'][:, i] = -obj['vp'][:, i]
obj['vn'][:, i] = -obj['vn'][:, i]
if (x != y) != z:
objflipface(obj)
def objswapaxis(obj, a=1, b=2):
obj['vp'][:, (a, b)] = obj['vp'][:, (b, a)]
obj['vn'][:, (a, b)] = obj['vn'][:, (b, a)]
def objreorient(obj, orient):
flip = False
if orient.startswith('-'):
flip = True
orient = orient[1:]
x, y, z = ['xyz'.index(o.lower()) for o in orient]
fx, fy, fz = [o.isupper() for o in orient]
if x != 0 or y != 1 or z != 2:
obj['vp'][:, (0, 1, 2)] = obj['vp'][:, (x, y, z)]
obj['vn'][:, (0, 1, 2)] = obj['vn'][:, (x, y, z)]
for i, fi in enumerate([fx, fy, fz]):
if fi:
obj['vp'][:, i] = -obj['vp'][:, i]
obj['vn'][:, i] = -obj['vn'][:, i]
if flip:
objflipface(obj)
return obj
def objflipface(obj):
obj['f'][:, ::-1, :] = obj['f'][:, :, :]
def objflipnorm(obj):
obj['vn'] = -obj['vn']
def objbothface(obj):
tmp = np.array(obj['f'])
tmp[:, ::-1, :] = obj['f'][:, :, :]
obj['f'] = np.concatenate([obj['f'], tmp])
obj['vn'] = np.concatenate([obj['vn'], -obj['vn']])
def objmknorm(obj):
fip = obj['f'][:, :, 0]
fit = obj['f'][:, :, 1]
p = obj['vp'][fip]
nrm = np.cross(p[:, 2] - p[:, 0], p[:, 1] - p[:, 0])
nrm /= np.linalg.norm(nrm, axis=1, keepdims=True)
fin = np.arange(obj['f'].shape[0])[:, np.newaxis]
fin = np.concatenate([fin for i in range(3)], axis=1)
newf = np.array([fip, fit, fin]).swapaxes(1, 2).swapaxes(0, 2)
obj['vn'] = nrm
obj['f'] = newf
def objbreakdown(obj):
res = {'f': [], 'vp': [], 'vt': [], 'vn': []}
lp, lt, ln = len(obj['vp']), len(obj['vt']), len(obj['vn'])
for i in range(len(obj['f'])):
faces_i = obj['f'][i].swapaxes(0, 1)
pos = obj['vp'][faces_i[0]]
tex = obj['vt'][faces_i[1]]
nrm = obj['vn'][faces_i[2]]
np0 = (pos[1] + pos[2]) / 2
np1 = (pos[2] + pos[0]) / 2
np2 = (pos[0] + pos[1]) / 2
nt0 = (tex[1] + tex[2]) / 2
nt1 = (tex[2] + tex[0]) / 2
nt2 = (tex[0] + tex[1]) / 2
nn0 = nrm[1] + nrm[2]
nn1 = nrm[2] + nrm[0]
nn2 = nrm[0] + nrm[1]
nn0 /= np.linalg.norm(nn0, axis=0, keepdims=True)
nn1 /= np.linalg.norm(nn1, axis=0, keepdims=True)
nn2 /= np.linalg.norm(nn2, axis=0, keepdims=True)
res['vp'] += [np0, np1, np2]
res['vt'] += [nt0, nt1, nt2]
res['vn'] += [nn0, nn1, nn2]
res['f'].append(np.array([
[faces_i[0, 0], lp+2, lp+1],
[faces_i[1, 0], lt+2, lt+1],
[faces_i[2, 0], ln+2, ln+1],
], dtype=np.int32))
res['f'].append(np.array([
[faces_i[0, 1], lp+0, lp+2],
[faces_i[1, 1], lt+0, lt+2],
[faces_i[2, 1], ln+0, ln+2],
], dtype=np.int32))
res['f'].append(np.array([
[faces_i[0, 2], lp+1, lp+0],
[faces_i[1, 2], lt+1, lt+0],
[faces_i[2, 2], ln+1, ln+0],
], dtype=np.int32))
res['f'].append(np.array([
[lp+0, lp+1, lp+2],
[lt+0, lt+1, lt+2],
[ln+0, ln+1, ln+2],
], dtype=np.int32))
lp += 3
lt += 3
ln += 3
obj['f'] = np.array(res['f']).swapaxes(1, 2)
obj['vp'] = np.concatenate([obj['vp'], np.array(res['vp'])], axis=0)
obj['vt'] = np.concatenate([obj['vt'], np.array(res['vt'])], axis=0)
obj['vn'] = np.concatenate([obj['vn'], np.array(res['vn'])], axis=0)
def objshow(obj, visual='color', res=(512, 512), ortho=False, showball=False, lightdir=[0.4, -1.5, 0.8]):
import taichi_three as t3
t3.reset()
scene = t3.Scene()
model = t3.Model.from_obj(obj)
scene.add_model(model)
if showball:
ball = t3.Model.from_obj(t3.readobj('assets/sphere.obj', scale=0.6))
scene.add_model(ball)
camera = t3.Camera(res=res)
if visual != 'color':
dim = 3
if visual == 'idepth':
dim = 0
if visual == 'texcoor':
dim = 2
camera.fb.add_buffer('normal', dim)
if ortho:
camera.type = camera.ORTHO
scene.add_camera(camera)
light = t3.Light(dir=lightdir)
scene.add_light(light)
gui = t3.GUI('Model', camera.res)
while gui.running:
gui.get_event(None)
gui.running = not gui.is_pressed(gui.ESCAPE)
camera.from_mouse(gui)
if showball:
ball.L2W.offset[None] = t3.Vector([1.75, -1.75, 0.0])
scene.render()
if visual == 'normal':
gui.set_image(camera.fb['normal'].to_numpy() * 0.5 + 0.5)
elif visual == 'color':
gui.set_image(camera.img)
else:
gui.set_image(camera.fb[visual].to_numpy())
gui.show()
| [
"taichi_three.reset",
"taichi_three.GUI",
"taichi_three.Light",
"io.BytesIO",
"math.cos",
"numpy.array",
"taichi_three.readobj",
"numpy.linalg.norm",
"taichi_three.Scene",
"numpy.arange",
"numpy.cross",
"numpy.linspace",
"taichi_three.Vector",
"numpy.concatenate",
"numpy.meshgrid",
"nu... | [((4620, 4668), 'numpy.concatenate', 'np.concatenate', (["[obj['vp'], other['vp']]"], {'axis': '(0)'}), "([obj['vp'], other['vp']], axis=0)\n", (4634, 4668), True, 'import numpy as np\n'), ((4685, 4733), 'numpy.concatenate', 'np.concatenate', (["[obj['vn'], other['vn']]"], {'axis': '(0)'}), "([obj['vn'], other['vn']], axis=0)\n", (4699, 4733), True, 'import numpy as np\n'), ((4750, 4798), 'numpy.concatenate', 'np.concatenate', (["[obj['vt'], other['vt']]"], {'axis': '(0)'}), "([obj['vt'], other['vt']], axis=0)\n", (4764, 4798), True, 'import numpy as np\n'), ((4856, 4885), 'numpy.average', 'np.average', (["obj['vp']"], {'axis': '(0)'}), "(obj['vp'], axis=0)\n", (4866, 4885), True, 'import numpy as np\n'), ((6052, 6070), 'numpy.array', 'np.array', (["obj['f']"], {}), "(obj['f'])\n", (6060, 6070), True, 'import numpy as np\n'), ((6126, 6157), 'numpy.concatenate', 'np.concatenate', (["[obj['f'], tmp]"], {}), "([obj['f'], tmp])\n", (6140, 6157), True, 'import numpy as np\n'), ((6174, 6213), 'numpy.concatenate', 'np.concatenate', (["[obj['vn'], -obj['vn']]"], {}), "([obj['vn'], -obj['vn']])\n", (6188, 6213), True, 'import numpy as np\n'), ((6325, 6371), 'numpy.cross', 'np.cross', (['(p[:, 2] - p[:, 0])', '(p[:, 1] - p[:, 0])'], {}), '(p[:, 2] - p[:, 0], p[:, 1] - p[:, 0])\n', (6333, 6371), True, 'import numpy as np\n'), ((6383, 6425), 'numpy.linalg.norm', 'np.linalg.norm', (['nrm'], {'axis': '(1)', 'keepdims': '(True)'}), '(nrm, axis=1, keepdims=True)\n', (6397, 6425), True, 'import numpy as np\n'), ((8739, 8749), 'taichi_three.reset', 't3.reset', ([], {}), '()\n', (8747, 8749), True, 'import taichi_three as t3\n'), ((8762, 8772), 'taichi_three.Scene', 't3.Scene', ([], {}), '()\n', (8770, 8772), True, 'import taichi_three as t3\n'), ((8785, 8807), 'taichi_three.Model.from_obj', 't3.Model.from_obj', (['obj'], {}), '(obj)\n', (8802, 8807), True, 'import taichi_three as t3\n'), ((8972, 8990), 'taichi_three.Camera', 't3.Camera', ([], {'res': 'res'}), '(res=res)\n', (8981, 8990), True, 'import taichi_three as t3\n'), ((9270, 9292), 'taichi_three.Light', 't3.Light', ([], {'dir': 'lightdir'}), '(dir=lightdir)\n', (9278, 9292), True, 'import taichi_three as t3\n'), ((9331, 9358), 'taichi_three.GUI', 't3.GUI', (['"""Model"""', 'camera.res'], {}), "('Model', camera.res)\n", (9337, 9358), True, 'import taichi_three as t3\n'), ((474, 510), 'numpy.array', 'np.array', (['vertices'], {'dtype': 'np.float32'}), '(vertices, dtype=np.float32)\n', (482, 510), True, 'import numpy as np\n'), ((527, 558), 'numpy.array', 'np.array', (['faces'], {'dtype': 'np.int32'}), '(faces, dtype=np.int32)\n', (535, 558), True, 'import numpy as np\n'), ((579, 616), 'numpy.array', 'np.array', (['texcoords'], {'dtype': 'np.float32'}), '(texcoords, dtype=np.float32)\n', (587, 616), True, 'import numpy as np\n'), ((635, 670), 'numpy.array', 'np.array', (['normals'], {'dtype': 'np.float32'}), '(normals, dtype=np.float32)\n', (643, 670), True, 'import numpy as np\n'), ((2085, 2097), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2094, 2097), True, 'import numpy as np\n'), ((2111, 2123), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2120, 2123), True, 'import numpy as np\n'), ((2137, 2157), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (2148, 2157), True, 'import numpy as np\n'), ((2171, 2191), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (2182, 2191), True, 'import numpy as np\n'), ((2629, 2695), 'numpy.concatenate', 'np.concatenate', (['[faces[:, (0, 1, 2)], faces[:, (0, 2, 3)]]'], {'axis': '(0)'}), '([faces[:, (0, 1, 2)], faces[:, (0, 2, 3)]], axis=0)\n', (2643, 2695), True, 'import numpy as np\n'), ((2714, 2735), 'numpy.array', 'np.array', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (2722, 2735), True, 'import numpy as np\n'), ((4910, 4927), 'numpy.abs', 'np.abs', (["obj['vp']"], {}), "(obj['vp'])\n", (4916, 4927), True, 'import numpy as np\n'), ((6436, 6464), 'numpy.arange', 'np.arange', (["obj['f'].shape[0]"], {}), "(obj['f'].shape[0])\n", (6445, 6464), True, 'import numpy as np\n'), ((7293, 7335), 'numpy.linalg.norm', 'np.linalg.norm', (['nn0'], {'axis': '(0)', 'keepdims': '(True)'}), '(nn0, axis=0, keepdims=True)\n', (7307, 7335), True, 'import numpy as np\n'), ((7351, 7393), 'numpy.linalg.norm', 'np.linalg.norm', (['nn1'], {'axis': '(0)', 'keepdims': '(True)'}), '(nn1, axis=0, keepdims=True)\n', (7365, 7393), True, 'import numpy as np\n'), ((7409, 7451), 'numpy.linalg.norm', 'np.linalg.norm', (['nn2'], {'axis': '(0)', 'keepdims': '(True)'}), '(nn2, axis=0, keepdims=True)\n', (7423, 7451), True, 'import numpy as np\n'), ((283, 296), 'io.BytesIO', 'BytesIO', (['code'], {}), '(code)\n', (290, 296), False, 'from io import BytesIO\n'), ((1956, 2014), 'numpy.array', 'np.array', (['[(x, y), (x, y + 1), (x + 1, y + 1), (x + 1, y)]'], {}), '([(x, y), (x, y + 1), (x + 1, y + 1), (x + 1, y)])\n', (1964, 2014), True, 'import numpy as np\n'), ((3663, 3716), 'numpy.concatenate', 'np.concatenate', (['[parts[name], faces[beg:end]]'], {'axis': '(0)'}), '([parts[name], faces[beg:end]], axis=0)\n', (3677, 3716), True, 'import numpy as np\n'), ((7587, 7716), 'numpy.array', 'np.array', (['[[faces_i[0, 0], lp + 2, lp + 1], [faces_i[1, 0], lt + 2, lt + 1], [faces_i\n [2, 0], ln + 2, ln + 1]]'], {'dtype': 'np.int32'}), '([[faces_i[0, 0], lp + 2, lp + 1], [faces_i[1, 0], lt + 2, lt + 1],\n [faces_i[2, 0], ln + 2, ln + 1]], dtype=np.int32)\n', (7595, 7716), True, 'import numpy as np\n'), ((7773, 7902), 'numpy.array', 'np.array', (['[[faces_i[0, 1], lp + 0, lp + 2], [faces_i[1, 1], lt + 0, lt + 2], [faces_i\n [2, 1], ln + 0, ln + 2]]'], {'dtype': 'np.int32'}), '([[faces_i[0, 1], lp + 0, lp + 2], [faces_i[1, 1], lt + 0, lt + 2],\n [faces_i[2, 1], ln + 0, ln + 2]], dtype=np.int32)\n', (7781, 7902), True, 'import numpy as np\n'), ((7959, 8088), 'numpy.array', 'np.array', (['[[faces_i[0, 2], lp + 1, lp + 0], [faces_i[1, 2], lt + 1, lt + 0], [faces_i\n [2, 2], ln + 1, ln + 0]]'], {'dtype': 'np.int32'}), '([[faces_i[0, 2], lp + 1, lp + 0], [faces_i[1, 2], lt + 1, lt + 0],\n [faces_i[2, 2], ln + 1, ln + 0]], dtype=np.int32)\n', (7967, 8088), True, 'import numpy as np\n'), ((8145, 8253), 'numpy.array', 'np.array', (['[[lp + 0, lp + 1, lp + 2], [lt + 0, lt + 1, lt + 2], [ln + 0, ln + 1, ln + 2]]'], {'dtype': 'np.int32'}), '([[lp + 0, lp + 1, lp + 2], [lt + 0, lt + 1, lt + 2], [ln + 0, ln +\n 1, ln + 2]], dtype=np.int32)\n', (8153, 8253), True, 'import numpy as np\n'), ((8343, 8361), 'numpy.array', 'np.array', (["res['f']"], {}), "(res['f'])\n", (8351, 8361), True, 'import numpy as np\n'), ((8420, 8439), 'numpy.array', 'np.array', (["res['vp']"], {}), "(res['vp'])\n", (8428, 8439), True, 'import numpy as np\n'), ((8493, 8512), 'numpy.array', 'np.array', (["res['vt']"], {}), "(res['vt'])\n", (8501, 8512), True, 'import numpy as np\n'), ((8566, 8585), 'numpy.array', 'np.array', (["res['vn']"], {}), "(res['vn'])\n", (8574, 8585), True, 'import numpy as np\n'), ((8885, 8927), 'taichi_three.readobj', 't3.readobj', (['"""assets/sphere.obj"""'], {'scale': '(0.6)'}), "('assets/sphere.obj', scale=0.6)\n", (8895, 8927), True, 'import taichi_three as t3\n'), ((9551, 9580), 'taichi_three.Vector', 't3.Vector', (['[1.75, -1.75, 0.0]'], {}), '([1.75, -1.75, 0.0])\n', (9560, 9580), True, 'import taichi_three as t3\n'), ((1182, 1197), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (1190, 1197), False, 'import math\n'), ((1199, 1214), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1207, 1214), False, 'import math\n'), ((2477, 2503), 'numpy.zeros', 'np.zeros', (['(n_particles, 1)'], {}), '((n_particles, 1))\n', (2485, 2503), True, 'import numpy as np\n'), ((2566, 2603), 'numpy.zeros', 'np.zeros', (['(n_faces, 4)'], {'dtype': 'np.int_'}), '((n_faces, 4), dtype=np.int_)\n', (2574, 2603), True, 'import numpy as np\n'), ((6549, 6574), 'numpy.array', 'np.array', (['[fip, fit, fin]'], {}), '([fip, fit, fin])\n', (6557, 6574), True, 'import numpy as np\n'), ((2214, 2233), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (2225, 2233), True, 'import numpy as np\n'), ((2297, 2326), 'numpy.meshgrid', 'np.meshgrid', (['xi[:-1]', 'yi[:-1]'], {}), '(xi[:-1], yi[:-1])\n', (2308, 2326), True, 'import numpy as np\n')] |
import json
import os
import sys
from datetime import date, datetime
import numpy as np
import pandas as pd
from dateutil import rrule
from config import *
class Krypfolio:
def __init__(self, debug=True) -> None:
super().__init__()
self.debug = debug
def _print(self, msg):
if self.debug:
print(msg)
else:
pass
def balance(self, portfolio):
"""
Calculate balance of the portfolio
"""
return sum(
[alloc["close"] * alloc["amount"] for alloc in portfolio["allocations"]]
)
def price(self, portfolio):
"""
Calculate price of the portfolio
"""
return sum(
[alloc["close"] * alloc["ratio"] for alloc in portfolio["allocations"]]
)
def update_price(self, portfolio, allocation):
"""
Utility to update the latest prices for portfolio
"""
# Update price of coins in the portfolio
for x in portfolio["allocations"]:
for y in allocation["allocations"]:
if x["symbol"] == y["symbol"]:
x["close"] = y["close"]
return portfolio
def rebalance(self, portfolio, prices, allocation, investment):
"""
Distribute the investment based on each coin's ratio
"""
# Update price of coins in the portfolio
portfolio = self.update_price(portfolio, allocation)
balance_ = self.balance(portfolio)
price_ = self.price(allocation)
self._print(
"Current price of Bitcoin: {}".format(
int(allocation["allocations"][0]["close"])
)
)
self._print("Current portfolio's balance: {}".format(int(balance_)))
# Inject investment in three stages
fund = 0
injection = None
if investment > 0:
if len(prices) >= 3:
a, b, c = prices[-2], prices[-1], price_
if a <= b and b <= c:
fund = investment
injection = "Third"
if (a <= b and b >= c and a <= c) or (a >= b and b <= c and a <= c):
fund = 0.25 * investment
injection = "Second"
if a >= b and b <= c and a >= c:
fund = 0.20 * investment
injection = "First"
if (a >= b and b >= c) or (a <= b and b >= c and a >= c):
fund = 0
injection = None
elif len(prices) == 2:
a, b = prices[-1], price_
if a <= b:
fund = 0.25 * investment
injection = "Second"
else:
fund = 0
injection = None
else:
fund = 0.20 * investment
injection = "First"
if balance_ == 0:
fund = 0.20 * investment
injection = "First"
balance_ += fund
for alloc in allocation["allocations"]:
alloc["amount"] = alloc["ratio"] * balance_ / alloc["close"]
self._print(
"{0} investment injection: {1} - leftover investment: {2}".format(
injection, int(fund), int(investment - fund)
)
)
return allocation, investment - fund
def main(self, strategy, loss, r, start):
"""
Args:
strategy: strategy name
loss: trailing loss percentage
r: rebalance period in week
start: start date
"""
# Initial invesment
investment = 10000
init_investment = investment
# Start date
start = datetime.strptime(start, "%Y-%m-%d")
today = date.today()
intervals = list(rrule.rrule(rrule.WEEKLY, dtstart=start, until=today))
intervals = [
intervals[i] for i in range(len(intervals)) if i % r == 0
] # rebalance after each r weeks
# Portfolios should follow the same structure
# List(Dict(symbol, price, ratio, market_cap, amount))
allocations = json.load(open(f"strategies/{strategy}.json", "r"))
allocations = [
{
"timestamp": datetime.strptime(k, "%Y-%m-%d"),
"allocations": allocations[k],
}
for k in allocations.keys()
]
allocations = [
alloc
for alloc in allocations
if "bitcoin" in [x["symbol"] for x in alloc["allocations"]]
] # bitcoin must be in valid allocation
krypfolio = allocations[0]
for alloc in krypfolio["allocations"]:
alloc["amount"] = 0 # init amount
# Prepare the folder for results
if not os.path.exists("./execution/results"):
os.mkdir("./execution/results")
# Rebalance the portfolio
start_btc = None
start_date = None
balance_ = None
end_balance_ = None
max_balance = -np.inf
prices = list()
kf_fund = list()
kf_allocation = dict()
for alloc in allocations:
if alloc["timestamp"] in intervals:
total_ratio = sum([x["ratio"] for x in alloc["allocations"]])
if (
np.abs(total_ratio - 1) > 0.001
): # check the validity of an allocation
self._print("You need to check the allocation strategy")
else:
self._print("*********************************")
self._print("Rebalance at {}".format(alloc["timestamp"]))
krypfolio, investment = self.rebalance(
krypfolio, prices, alloc, investment
)
balance_ = self.balance(krypfolio)
self._print(
"Current total value: {}".format(int(balance_ + investment))
)
kf_fund.append([alloc["timestamp"], balance_ + investment])
kf_allocation[alloc["timestamp"].strftime("%Y-%m-%d")] = krypfolio[
"allocations"
]
price_ = self.price(krypfolio)
prices.append(price_)
if balance_ > max_balance:
max_balance = balance_
if ((max_balance - balance_) / max_balance > loss) and (
balance_ != 0
):
# Reset the portfolio
self._print("STOP LOSS")
for alloc_ in krypfolio["allocations"]:
alloc_["amount"] = 0
investment += balance_
max_balance = -np.inf
if not start_btc:
start_btc = [
x["close"]
for x in alloc["allocations"]
if x["symbol"] == "bitcoin"
][0]
start_date = alloc["timestamp"]
else: # daily alloc, no ratio was calculated
krypfolio = self.update_price(krypfolio, alloc)
balance_ = self.balance(krypfolio)
kf_fund.append([alloc["timestamp"], balance_ + investment])
kf_allocation[alloc["timestamp"].strftime("%Y-%m-%d")] = krypfolio[
"allocations"
]
if balance_ > max_balance:
max_balance = balance_ + 0.001
if ((max_balance - balance_) / max_balance > loss) and (balance_ != 0):
# Reset the portfolio
self._print("*********************************")
self._print("STOP LOSS at {}".format(alloc["timestamp"]))
self._print("Current portfolio's balance {}".format(int(balance_)))
self._print(
"Current loss {}".format(
round((max_balance - balance_) / max_balance, 3)
)
)
for alloc_ in krypfolio["allocations"]:
alloc_["amount"] = 0
investment += balance_
max_balance = -np.inf
end_date = allocations[-1]["timestamp"]
end_btc = [
x["close"]
for x in allocations[-1]["allocations"]
if x["symbol"] == "bitcoin"
][0]
end_balance_ = investment + balance_
self._print("*********************************")
self._print("REPORT")
self._print("Start date: {}".format(start_date))
self._print("End date: {}".format(end_date))
self._print("Bitcoin: {}x".format(round(end_btc / start_btc, 1)))
self._print("Krypfolio: {}x".format(round(end_balance_ / init_investment, 1)))
self._print("*********************************")
# Write Krypfolio daily results to csv
df = pd.DataFrame(kf_fund, columns=["timestamp", "value"])
df.to_csv(
"./execution/results/{0}_{1}_{2}_{3}.csv".format(
strategy, start.strftime("%Y-%m-%d"), loss, r
),
index=False,
)
if self.debug:
# Write Krypfolio daily allocations to json
json.dump(
kf_allocation,
open(
"./execution/results/{0}_{1}_{2}_{3}.json".format(
strategy, start.strftime("%Y-%m-%d"), loss, r
),
"w",
),
indent=4,
sort_keys=True,
default=str,
)
if __name__ == "__main__":
krypfolio = Krypfolio(debug=True)
krypfolio.main(
strategy="HODL{0}-{1}-days-{2}-cap".format(n_coins, alpha, str(int(100 * cap))),
loss=loss,
r=r,
start=start,
)
| [
"os.path.exists",
"numpy.abs",
"dateutil.rrule.rrule",
"datetime.datetime.strptime",
"os.mkdir",
"pandas.DataFrame",
"datetime.date.today"
] | [((3771, 3807), 'datetime.datetime.strptime', 'datetime.strptime', (['start', '"""%Y-%m-%d"""'], {}), "(start, '%Y-%m-%d')\n", (3788, 3807), False, 'from datetime import date, datetime\n'), ((3824, 3836), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3834, 3836), False, 'from datetime import date, datetime\n'), ((9177, 9230), 'pandas.DataFrame', 'pd.DataFrame', (['kf_fund'], {'columns': "['timestamp', 'value']"}), "(kf_fund, columns=['timestamp', 'value'])\n", (9189, 9230), True, 'import pandas as pd\n'), ((3863, 3916), 'dateutil.rrule.rrule', 'rrule.rrule', (['rrule.WEEKLY'], {'dtstart': 'start', 'until': 'today'}), '(rrule.WEEKLY, dtstart=start, until=today)\n', (3874, 3916), False, 'from dateutil import rrule\n'), ((4843, 4880), 'os.path.exists', 'os.path.exists', (['"""./execution/results"""'], {}), "('./execution/results')\n", (4857, 4880), False, 'import os\n'), ((4894, 4925), 'os.mkdir', 'os.mkdir', (['"""./execution/results"""'], {}), "('./execution/results')\n", (4902, 4925), False, 'import os\n'), ((4311, 4343), 'datetime.datetime.strptime', 'datetime.strptime', (['k', '"""%Y-%m-%d"""'], {}), "(k, '%Y-%m-%d')\n", (4328, 4343), False, 'from datetime import date, datetime\n'), ((5375, 5398), 'numpy.abs', 'np.abs', (['(total_ratio - 1)'], {}), '(total_ratio - 1)\n', (5381, 5398), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from linearmodels.iv.data import IVData
try:
import xarray as xr
MISSING_XARRAY = False
except ImportError:
MISSING_XARRAY = True
def test_numpy_2d() -> None:
x = np.empty((10, 2))
xdh = IVData(x)
assert xdh.ndim == x.ndim
assert xdh.cols == ["x.0", "x.1"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
assert xdh.labels == {0: xdh.rows, 1: xdh.cols}
def test_numpy_1d() -> None:
x = np.empty(10)
xdh = IVData(x)
assert xdh.ndim == 2
assert xdh.cols == ["x"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
def test_pandas_df_numeric() -> None:
x = np.empty((10, 2))
index = pd.date_range("2017-01-01", periods=10)
xdf = pd.DataFrame(x, columns=["a", "b"], index=index)
xdh = IVData(xdf)
assert xdh.ndim == 2
assert xdh.cols == list(xdf.columns)
assert xdh.rows == list(xdf.index)
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
def test_pandas_series_numeric() -> None:
x = np.empty(10)
index = pd.date_range("2017-01-01", periods=10)
xs = pd.Series(x, name="charlie", index=index)
xdh = IVData(xs)
assert xdh.ndim == 2
assert xdh.cols == [xs.name]
assert xdh.rows == list(xs.index)
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
@pytest.mark.skipif(MISSING_XARRAY, reason="xarray not installed")
def test_xarray_1d() -> None:
x_np = np.random.randn(10)
x = xr.DataArray(x_np)
dh = IVData(x, "some_variable")
assert_equal(dh.ndarray, x_np[:, None])
assert dh.rows == list(np.arange(10))
assert dh.cols == ["some_variable.0"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
index = pd.date_range("2017-01-01", periods=10)
x = xr.DataArray(x_np, [("time", index)])
dh = IVData(x, "some_variable")
assert_equal(dh.ndarray, x_np[:, None])
assert_series_equal(pd.Series(dh.rows), pd.Series(list(index)))
assert dh.cols == ["some_variable.0"]
expected = pd.DataFrame(x_np[:, None], columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
@pytest.mark.skipif(MISSING_XARRAY, reason="xarray not installed")
def test_xarray_2d() -> None:
x_np = np.random.randn(10, 2)
x = xr.DataArray(x_np)
dh = IVData(x)
assert_equal(dh.ndarray, x_np)
assert dh.rows == list(np.arange(10))
assert dh.cols == ["x.0", "x.1"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
index = pd.date_range("2017-01-01", periods=10)
x = xr.DataArray(x_np, [("time", index), ("variables", ["apple", "banana"])])
dh = IVData(x)
assert_equal(dh.ndarray, x_np)
assert_series_equal(pd.Series(dh.rows), pd.Series(list(index)))
assert dh.cols == ["apple", "banana"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
def test_invalid_types() -> None:
with pytest.raises(ValueError):
IVData(np.empty((1, 1, 1)))
with pytest.raises(ValueError):
IVData(np.empty((10, 2, 2)))
with pytest.raises(TypeError):
class AnotherClass(object):
_ndim = 2
@property
def ndim(self) -> int:
return self._ndim
IVData(AnotherClass())
def test_string_cat_equiv() -> None:
s1 = pd.Series(["a", "b", "a", "b", "c", "d", "a", "b"])
s2 = pd.Series(np.arange(8.0))
s3 = pd.Series(
["apple", "banana", "apple", "banana", "cherry", "date", "apple", "banana"]
)
df = pd.DataFrame({"string": s1, "number": s2, "other_string": s3})
dh = IVData(df)
df_cat = df.copy()
df_cat["string"] = df_cat["string"].astype("category")
dh_cat = IVData(df_cat)
assert_frame_equal(dh.pandas, dh_cat.pandas)
def test_existing_datahandler() -> None:
x = np.empty((10, 2))
index = pd.date_range("2017-01-01", periods=10)
xdf = pd.DataFrame(x, columns=["a", "b"], index=index)
xdh = IVData(xdf)
xdh2 = IVData(xdh)
assert xdh is not xdh2
assert xdh.cols == xdh2.cols
assert xdh.rows == xdh2.rows
assert_equal(xdh.ndarray, xdh2.ndarray)
assert xdh.ndim == xdh2.ndim
assert_frame_equal(xdh.pandas, xdh2.pandas)
def test_categorical() -> None:
index = pd.date_range("2017-01-01", periods=10)
cat = pd.Categorical(["a", "b", "a", "b", "a", "a", "b", "c", "c", "a"])
num = np.empty(10)
df = pd.DataFrame(dict(cat=cat, num=num), index=index)
dh = IVData(df)
assert dh.ndim == 2
assert dh.shape == (10, 3)
assert sorted(dh.cols) == sorted(["cat.b", "cat.c", "num"])
assert dh.rows == list(index)
assert_equal(dh.pandas["num"].values, num)
assert_equal(dh.pandas["cat.b"].values, (cat == "b").astype(float))
assert_equal(dh.pandas["cat.c"].values, (cat == "c").astype(float))
def test_categorical_series() -> None:
index = pd.date_range("2017-01-01", periods=10)
cat = pd.Categorical(["a", "b", "a", "b", "a", "a", "b", "c", "c", "a"])
s = pd.Series(cat, name="cat", index=index)
dh = IVData(s)
assert dh.ndim == 2
assert dh.shape == (10, 2)
assert sorted(dh.cols) == sorted(["cat.b", "cat.c"])
assert dh.rows == list(index)
assert_equal(dh.pandas["cat.b"].values, (cat == "b").astype(float))
assert_equal(dh.pandas["cat.c"].values, (cat == "c").astype(float))
def test_categorical_no_conversion() -> None:
index = pd.date_range("2017-01-01", periods=10)
cat = pd.Categorical(["a", "b", "a", "b", "a", "a", "b", "c", "c", "a"])
s = pd.Series(cat, index=index, name="cat")
dh = IVData(s, convert_dummies=False)
assert dh.ndim == 2
assert dh.shape == (10, 1)
assert dh.cols == ["cat"]
assert dh.rows == list(index)
df = pd.DataFrame(s)
assert_frame_equal(dh.pandas, df)
def test_categorical_keep_first() -> None:
index = pd.date_range("2017-01-01", periods=10)
cat = pd.Categorical(["a", "b", "a", "b", "a", "a", "b", "c", "c", "a"])
num = np.empty(10)
df = pd.DataFrame(dict(cat=cat, num=num), index=index)
dh = IVData(df, drop_first=False)
assert dh.ndim == 2
assert dh.shape == (10, 4)
assert sorted(dh.cols) == sorted(["cat.a", "cat.b", "cat.c", "num"])
assert dh.rows == list(index)
assert_equal(dh.pandas["num"].values, num)
assert_equal(dh.pandas["cat.a"].values, (cat == "a").astype(float))
assert_equal(dh.pandas["cat.b"].values, (cat == "b").astype(float))
assert_equal(dh.pandas["cat.c"].values, (cat == "c").astype(float))
def test_nobs_missing_error() -> None:
with pytest.raises(ValueError):
IVData(None)
def test_incorrect_nobs() -> None:
x = np.empty((10, 1))
with pytest.raises(ValueError):
IVData(x, nobs=100)
def test_mixed_data() -> None:
s = pd.Series([1, 2, "a", -3.0])
with pytest.raises(ValueError):
IVData(s)
def test_duplicate_column_names():
x = pd.DataFrame(np.ones((3, 2)), columns=["x", "x"])
with pytest.raises(ValueError):
IVData(x)
| [
"pandas.Series",
"numpy.testing.assert_equal",
"numpy.ones",
"numpy.arange",
"pandas.Categorical",
"numpy.random.randn",
"numpy.empty",
"pytest.raises",
"xarray.DataArray",
"pytest.mark.skipif",
"pandas.DataFrame",
"pandas.testing.assert_frame_equal",
"linearmodels.iv.data.IVData",
"pandas... | [((2017, 2082), 'pytest.mark.skipif', 'pytest.mark.skipif', (['MISSING_XARRAY'], {'reason': '"""xarray not installed"""'}), "(MISSING_XARRAY, reason='xarray not installed')\n", (2035, 2082), False, 'import pytest\n'), ((2856, 2921), 'pytest.mark.skipif', 'pytest.mark.skipif', (['MISSING_XARRAY'], {'reason': '"""xarray not installed"""'}), "(MISSING_XARRAY, reason='xarray not installed')\n", (2874, 2921), False, 'import pytest\n'), ((343, 360), 'numpy.empty', 'np.empty', (['(10, 2)'], {}), '((10, 2))\n', (351, 360), True, 'import numpy as np\n'), ((371, 380), 'linearmodels.iv.data.IVData', 'IVData', (['x'], {}), '(x)\n', (377, 380), False, 'from linearmodels.iv.data import IVData\n'), ((496, 524), 'numpy.testing.assert_equal', 'assert_equal', (['xdh.ndarray', 'x'], {}), '(xdh.ndarray, x)\n', (508, 524), False, 'from numpy.testing import assert_equal\n'), ((534, 583), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': 'xdh.cols', 'index': 'xdh.rows'}), '(x, columns=xdh.cols, index=xdh.rows)\n', (546, 583), True, 'import pandas as pd\n'), ((588, 622), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['xdh.pandas', 'df'], {}), '(xdh.pandas, df)\n', (606, 622), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((746, 758), 'numpy.empty', 'np.empty', (['(10)'], {}), '(10)\n', (754, 758), True, 'import numpy as np\n'), ((769, 778), 'linearmodels.iv.data.IVData', 'IVData', (['x'], {}), '(x)\n', (775, 778), False, 'from linearmodels.iv.data import IVData\n'), ((880, 917), 'numpy.testing.assert_equal', 'assert_equal', (['xdh.ndarray', 'x[:, None]'], {}), '(xdh.ndarray, x[:, None])\n', (892, 917), False, 'from numpy.testing import assert_equal\n'), ((927, 985), 'pandas.DataFrame', 'pd.DataFrame', (['x[:, None]'], {'columns': 'xdh.cols', 'index': 'xdh.rows'}), '(x[:, None], columns=xdh.cols, index=xdh.rows)\n', (939, 985), True, 'import pandas as pd\n'), ((990, 1024), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['xdh.pandas', 'df'], {}), '(xdh.pandas, df)\n', (1008, 1024), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((1105, 1122), 'numpy.empty', 'np.empty', (['(10, 2)'], {}), '((10, 2))\n', (1113, 1122), True, 'import numpy as np\n'), ((1135, 1174), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'periods': '(10)'}), "('2017-01-01', periods=10)\n", (1148, 1174), True, 'import pandas as pd\n'), ((1185, 1233), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': "['a', 'b']", 'index': 'index'}), "(x, columns=['a', 'b'], index=index)\n", (1197, 1233), True, 'import pandas as pd\n'), ((1244, 1255), 'linearmodels.iv.data.IVData', 'IVData', (['xdf'], {}), '(xdf)\n', (1250, 1255), False, 'from linearmodels.iv.data import IVData\n'), ((1365, 1393), 'numpy.testing.assert_equal', 'assert_equal', (['xdh.ndarray', 'x'], {}), '(xdh.ndarray, x)\n', (1377, 1393), False, 'from numpy.testing import assert_equal\n'), ((1469, 1503), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['xdh.pandas', 'df'], {}), '(xdh.pandas, df)\n', (1487, 1503), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((1588, 1600), 'numpy.empty', 'np.empty', (['(10)'], {}), '(10)\n', (1596, 1600), True, 'import numpy as np\n'), ((1613, 1652), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'periods': '(10)'}), "('2017-01-01', periods=10)\n", (1626, 1652), True, 'import pandas as pd\n'), ((1662, 1703), 'pandas.Series', 'pd.Series', (['x'], {'name': '"""charlie"""', 'index': 'index'}), "(x, name='charlie', index=index)\n", (1671, 1703), True, 'import pandas as pd\n'), ((1714, 1724), 'linearmodels.iv.data.IVData', 'IVData', (['xs'], {}), '(xs)\n', (1720, 1724), False, 'from linearmodels.iv.data import IVData\n'), ((1825, 1862), 'numpy.testing.assert_equal', 'assert_equal', (['xdh.ndarray', 'x[:, None]'], {}), '(xdh.ndarray, x[:, None])\n', (1837, 1862), False, 'from numpy.testing import assert_equal\n'), ((1947, 1981), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['xdh.pandas', 'df'], {}), '(xdh.pandas, df)\n', (1965, 1981), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((2124, 2143), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (2139, 2143), True, 'import numpy as np\n'), ((2152, 2170), 'xarray.DataArray', 'xr.DataArray', (['x_np'], {}), '(x_np)\n', (2164, 2170), True, 'import xarray as xr\n'), ((2180, 2206), 'linearmodels.iv.data.IVData', 'IVData', (['x', '"""some_variable"""'], {}), "(x, 'some_variable')\n", (2186, 2206), False, 'from linearmodels.iv.data import IVData\n'), ((2211, 2250), 'numpy.testing.assert_equal', 'assert_equal', (['dh.ndarray', 'x_np[:, None]'], {}), '(dh.ndarray, x_np[:, None])\n', (2223, 2250), False, 'from numpy.testing import assert_equal\n'), ((2350, 2400), 'pandas.DataFrame', 'pd.DataFrame', (['x_np'], {'columns': 'dh.cols', 'index': 'dh.rows'}), '(x_np, columns=dh.cols, index=dh.rows)\n', (2362, 2400), True, 'import pandas as pd\n'), ((2405, 2444), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['expected', 'dh.pandas'], {}), '(expected, dh.pandas)\n', (2423, 2444), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((2458, 2497), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'periods': '(10)'}), "('2017-01-01', periods=10)\n", (2471, 2497), True, 'import pandas as pd\n'), ((2506, 2543), 'xarray.DataArray', 'xr.DataArray', (['x_np', "[('time', index)]"], {}), "(x_np, [('time', index)])\n", (2518, 2543), True, 'import xarray as xr\n'), ((2553, 2579), 'linearmodels.iv.data.IVData', 'IVData', (['x', '"""some_variable"""'], {}), "(x, 'some_variable')\n", (2559, 2579), False, 'from linearmodels.iv.data import IVData\n'), ((2584, 2623), 'numpy.testing.assert_equal', 'assert_equal', (['dh.ndarray', 'x_np[:, None]'], {}), '(dh.ndarray, x_np[:, None])\n', (2596, 2623), False, 'from numpy.testing import assert_equal\n'), ((2749, 2808), 'pandas.DataFrame', 'pd.DataFrame', (['x_np[:, None]'], {'columns': 'dh.cols', 'index': 'dh.rows'}), '(x_np[:, None], columns=dh.cols, index=dh.rows)\n', (2761, 2808), True, 'import pandas as pd\n'), ((2813, 2852), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['expected', 'dh.pandas'], {}), '(expected, dh.pandas)\n', (2831, 2852), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((2963, 2985), 'numpy.random.randn', 'np.random.randn', (['(10)', '(2)'], {}), '(10, 2)\n', (2978, 2985), True, 'import numpy as np\n'), ((2994, 3012), 'xarray.DataArray', 'xr.DataArray', (['x_np'], {}), '(x_np)\n', (3006, 3012), True, 'import xarray as xr\n'), ((3022, 3031), 'linearmodels.iv.data.IVData', 'IVData', (['x'], {}), '(x)\n', (3028, 3031), False, 'from linearmodels.iv.data import IVData\n'), ((3036, 3066), 'numpy.testing.assert_equal', 'assert_equal', (['dh.ndarray', 'x_np'], {}), '(dh.ndarray, x_np)\n', (3048, 3066), False, 'from numpy.testing import assert_equal\n'), ((3161, 3211), 'pandas.DataFrame', 'pd.DataFrame', (['x_np'], {'columns': 'dh.cols', 'index': 'dh.rows'}), '(x_np, columns=dh.cols, index=dh.rows)\n', (3173, 3211), True, 'import pandas as pd\n'), ((3216, 3255), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['expected', 'dh.pandas'], {}), '(expected, dh.pandas)\n', (3234, 3255), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((3269, 3308), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'periods': '(10)'}), "('2017-01-01', periods=10)\n", (3282, 3308), True, 'import pandas as pd\n'), ((3317, 3390), 'xarray.DataArray', 'xr.DataArray', (['x_np', "[('time', index), ('variables', ['apple', 'banana'])]"], {}), "(x_np, [('time', index), ('variables', ['apple', 'banana'])])\n", (3329, 3390), True, 'import xarray as xr\n'), ((3400, 3409), 'linearmodels.iv.data.IVData', 'IVData', (['x'], {}), '(x)\n', (3406, 3409), False, 'from linearmodels.iv.data import IVData\n'), ((3414, 3444), 'numpy.testing.assert_equal', 'assert_equal', (['dh.ndarray', 'x_np'], {}), '(dh.ndarray, x_np)\n', (3426, 3444), False, 'from numpy.testing import assert_equal\n'), ((3570, 3620), 'pandas.DataFrame', 'pd.DataFrame', (['x_np'], {'columns': 'dh.cols', 'index': 'dh.rows'}), '(x_np, columns=dh.cols, index=dh.rows)\n', (3582, 3620), True, 'import pandas as pd\n'), ((3625, 3664), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['expected', 'dh.pandas'], {}), '(expected, dh.pandas)\n', (3643, 3664), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((4112, 4163), 'pandas.Series', 'pd.Series', (["['a', 'b', 'a', 'b', 'c', 'd', 'a', 'b']"], {}), "(['a', 'b', 'a', 'b', 'c', 'd', 'a', 'b'])\n", (4121, 4163), True, 'import pandas as pd\n'), ((4208, 4298), 'pandas.Series', 'pd.Series', (["['apple', 'banana', 'apple', 'banana', 'cherry', 'date', 'apple', 'banana']"], {}), "(['apple', 'banana', 'apple', 'banana', 'cherry', 'date', 'apple',\n 'banana'])\n", (4217, 4298), True, 'import pandas as pd\n'), ((4318, 4380), 'pandas.DataFrame', 'pd.DataFrame', (["{'string': s1, 'number': s2, 'other_string': s3}"], {}), "({'string': s1, 'number': s2, 'other_string': s3})\n", (4330, 4380), True, 'import pandas as pd\n'), ((4390, 4400), 'linearmodels.iv.data.IVData', 'IVData', (['df'], {}), '(df)\n', (4396, 4400), False, 'from linearmodels.iv.data import IVData\n'), ((4496, 4510), 'linearmodels.iv.data.IVData', 'IVData', (['df_cat'], {}), '(df_cat)\n', (4502, 4510), False, 'from linearmodels.iv.data import IVData\n'), ((4515, 4559), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['dh.pandas', 'dh_cat.pandas'], {}), '(dh.pandas, dh_cat.pandas)\n', (4533, 4559), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((4611, 4628), 'numpy.empty', 'np.empty', (['(10, 2)'], {}), '((10, 2))\n', (4619, 4628), True, 'import numpy as np\n'), ((4641, 4680), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'periods': '(10)'}), "('2017-01-01', periods=10)\n", (4654, 4680), True, 'import pandas as pd\n'), ((4691, 4739), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': "['a', 'b']", 'index': 'index'}), "(x, columns=['a', 'b'], index=index)\n", (4703, 4739), True, 'import pandas as pd\n'), ((4750, 4761), 'linearmodels.iv.data.IVData', 'IVData', (['xdf'], {}), '(xdf)\n', (4756, 4761), False, 'from linearmodels.iv.data import IVData\n'), ((4773, 4784), 'linearmodels.iv.data.IVData', 'IVData', (['xdh'], {}), '(xdh)\n', (4779, 4784), False, 'from linearmodels.iv.data import IVData\n'), ((4882, 4921), 'numpy.testing.assert_equal', 'assert_equal', (['xdh.ndarray', 'xdh2.ndarray'], {}), '(xdh.ndarray, xdh2.ndarray)\n', (4894, 4921), False, 'from numpy.testing import assert_equal\n'), ((4959, 5002), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['xdh.pandas', 'xdh2.pandas'], {}), '(xdh.pandas, xdh2.pandas)\n', (4977, 5002), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((5049, 5088), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'periods': '(10)'}), "('2017-01-01', periods=10)\n", (5062, 5088), True, 'import pandas as pd\n'), ((5099, 5165), 'pandas.Categorical', 'pd.Categorical', (["['a', 'b', 'a', 'b', 'a', 'a', 'b', 'c', 'c', 'a']"], {}), "(['a', 'b', 'a', 'b', 'a', 'a', 'b', 'c', 'c', 'a'])\n", (5113, 5165), True, 'import pandas as pd\n'), ((5176, 5188), 'numpy.empty', 'np.empty', (['(10)'], {}), '(10)\n', (5184, 5188), True, 'import numpy as np\n'), ((5257, 5267), 'linearmodels.iv.data.IVData', 'IVData', (['df'], {}), '(df)\n', (5263, 5267), False, 'from linearmodels.iv.data import IVData\n'), ((5425, 5467), 'numpy.testing.assert_equal', 'assert_equal', (["dh.pandas['num'].values", 'num'], {}), "(dh.pandas['num'].values, num)\n", (5437, 5467), False, 'from numpy.testing import assert_equal\n'), ((5665, 5704), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'periods': '(10)'}), "('2017-01-01', periods=10)\n", (5678, 5704), True, 'import pandas as pd\n'), ((5715, 5781), 'pandas.Categorical', 'pd.Categorical', (["['a', 'b', 'a', 'b', 'a', 'a', 'b', 'c', 'c', 'a']"], {}), "(['a', 'b', 'a', 'b', 'a', 'a', 'b', 'c', 'c', 'a'])\n", (5729, 5781), True, 'import pandas as pd\n'), ((5790, 5829), 'pandas.Series', 'pd.Series', (['cat'], {'name': '"""cat"""', 'index': 'index'}), "(cat, name='cat', index=index)\n", (5799, 5829), True, 'import pandas as pd\n'), ((5839, 5848), 'linearmodels.iv.data.IVData', 'IVData', (['s'], {}), '(s)\n', (5845, 5848), False, 'from linearmodels.iv.data import IVData\n'), ((6199, 6238), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'periods': '(10)'}), "('2017-01-01', periods=10)\n", (6212, 6238), True, 'import pandas as pd\n'), ((6249, 6315), 'pandas.Categorical', 'pd.Categorical', (["['a', 'b', 'a', 'b', 'a', 'a', 'b', 'c', 'c', 'a']"], {}), "(['a', 'b', 'a', 'b', 'a', 'a', 'b', 'c', 'c', 'a'])\n", (6263, 6315), True, 'import pandas as pd\n'), ((6324, 6363), 'pandas.Series', 'pd.Series', (['cat'], {'index': 'index', 'name': '"""cat"""'}), "(cat, index=index, name='cat')\n", (6333, 6363), True, 'import pandas as pd\n'), ((6373, 6405), 'linearmodels.iv.data.IVData', 'IVData', (['s'], {'convert_dummies': '(False)'}), '(s, convert_dummies=False)\n', (6379, 6405), False, 'from linearmodels.iv.data import IVData\n'), ((6534, 6549), 'pandas.DataFrame', 'pd.DataFrame', (['s'], {}), '(s)\n', (6546, 6549), True, 'import pandas as pd\n'), ((6554, 6587), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['dh.pandas', 'df'], {}), '(dh.pandas, df)\n', (6572, 6587), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((6645, 6684), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'periods': '(10)'}), "('2017-01-01', periods=10)\n", (6658, 6684), True, 'import pandas as pd\n'), ((6695, 6761), 'pandas.Categorical', 'pd.Categorical', (["['a', 'b', 'a', 'b', 'a', 'a', 'b', 'c', 'c', 'a']"], {}), "(['a', 'b', 'a', 'b', 'a', 'a', 'b', 'c', 'c', 'a'])\n", (6709, 6761), True, 'import pandas as pd\n'), ((6772, 6784), 'numpy.empty', 'np.empty', (['(10)'], {}), '(10)\n', (6780, 6784), True, 'import numpy as np\n'), ((6853, 6881), 'linearmodels.iv.data.IVData', 'IVData', (['df'], {'drop_first': '(False)'}), '(df, drop_first=False)\n', (6859, 6881), False, 'from linearmodels.iv.data import IVData\n'), ((7048, 7090), 'numpy.testing.assert_equal', 'assert_equal', (["dh.pandas['num'].values", 'num'], {}), "(dh.pandas['num'].values, num)\n", (7060, 7090), False, 'from numpy.testing import assert_equal\n'), ((7450, 7467), 'numpy.empty', 'np.empty', (['(10, 1)'], {}), '((10, 1))\n', (7458, 7467), True, 'import numpy as np\n'), ((7573, 7601), 'pandas.Series', 'pd.Series', (["[1, 2, 'a', -3.0]"], {}), "([1, 2, 'a', -3.0])\n", (7582, 7601), True, 'import pandas as pd\n'), ((2648, 2666), 'pandas.Series', 'pd.Series', (['dh.rows'], {}), '(dh.rows)\n', (2657, 2666), True, 'import pandas as pd\n'), ((3469, 3487), 'pandas.Series', 'pd.Series', (['dh.rows'], {}), '(dh.rows)\n', (3478, 3487), True, 'import pandas as pd\n'), ((3710, 3735), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3723, 3735), False, 'import pytest\n'), ((3782, 3807), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3795, 3807), False, 'import pytest\n'), ((3855, 3879), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3868, 3879), False, 'import pytest\n'), ((4183, 4197), 'numpy.arange', 'np.arange', (['(8.0)'], {}), '(8.0)\n', (4192, 4197), True, 'import numpy as np\n'), ((7357, 7382), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7370, 7382), False, 'import pytest\n'), ((7392, 7404), 'linearmodels.iv.data.IVData', 'IVData', (['None'], {}), '(None)\n', (7398, 7404), False, 'from linearmodels.iv.data import IVData\n'), ((7477, 7502), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7490, 7502), False, 'import pytest\n'), ((7512, 7531), 'linearmodels.iv.data.IVData', 'IVData', (['x'], {'nobs': '(100)'}), '(x, nobs=100)\n', (7518, 7531), False, 'from linearmodels.iv.data import IVData\n'), ((7611, 7636), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7624, 7636), False, 'import pytest\n'), ((7646, 7655), 'linearmodels.iv.data.IVData', 'IVData', (['s'], {}), '(s)\n', (7652, 7655), False, 'from linearmodels.iv.data import IVData\n'), ((7714, 7729), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (7721, 7729), True, 'import numpy as np\n'), ((7760, 7785), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7773, 7785), False, 'import pytest\n'), ((7795, 7804), 'linearmodels.iv.data.IVData', 'IVData', (['x'], {}), '(x)\n', (7801, 7804), False, 'from linearmodels.iv.data import IVData\n'), ((477, 490), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (486, 490), True, 'import numpy as np\n'), ((861, 874), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (870, 874), True, 'import numpy as np\n'), ((1403, 1452), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': 'xdh.cols', 'index': 'xdh.rows'}), '(x, columns=xdh.cols, index=xdh.rows)\n', (1415, 1452), True, 'import pandas as pd\n'), ((1872, 1930), 'pandas.DataFrame', 'pd.DataFrame', (['x[:, None]'], {'columns': 'xdh.cols', 'index': 'xdh.rows'}), '(x[:, None], columns=xdh.cols, index=xdh.rows)\n', (1884, 1930), True, 'import pandas as pd\n'), ((2278, 2291), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2287, 2291), True, 'import numpy as np\n'), ((3094, 3107), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3103, 3107), True, 'import numpy as np\n'), ((3752, 3771), 'numpy.empty', 'np.empty', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (3760, 3771), True, 'import numpy as np\n'), ((3824, 3844), 'numpy.empty', 'np.empty', (['(10, 2, 2)'], {}), '((10, 2, 2))\n', (3832, 3844), True, 'import numpy as np\n')] |
# utility functions such as statistics
import skimage.io as io
import json
import argparse
import numpy as np
import os
def get_subset_stats(json_path):
"""
Calculate the statistics of subset dataset
Args:
json_path: A path to subset json file
"""
with open(json_path) as json_file:
data_index = json.load(json_file)
stats = {}
for subset in ['train','test']:
stats[subset] = {'Cancer': len([k for k,v in data_index.items() if (v['subset'] == subset) and (v['cancer'] == True)]),
'No Cancer': len([k for k,v in data_index.items() if (v['subset'] == subset) and (v['cancer'] == False)])}
print("{:<8} {:<8} {:<10} {:<8}".format('Subset', 'Total', 'Cancerous', 'Non-cancerous'))
for k, v in stats.items():
cancer = v['Cancer']
non_cancer = v['No Cancer']
print("{:<8} {:<8} {:<10} {:<8}".format(k, cancer+non_cancer,cancer, non_cancer))
def metrics_summary(metric_path):
"""
Calculate the statistics of evaluation metrics
Args:
metric_path: A path to metric json file directory
"""
print(
"{:<12} {:<15} {:<5} {:<5} {:<5} {:<5} {:<5} {:<5} {:<5} {:<5}".format('Model', 'Dataset', 'avg_dice', 'c_dice',
'n_dice', 'precision', 'recall',
'overlap', 'FPR', 'FNR'))
for file in os.listdir(metric_path):
file_path = os.path.join(metric_path, file)
with open(file_path) as json_file:
metrics = json.load(json_file)
dataset = metrics['train']['dataset']
if dataset == None:
dataset = "original"
model = metrics['train']['model']
image_size = metrics['train']['image_size']
avg_dice = metrics['test']['average_dice_score']
cancer_dice = metrics['test']['average_cancer_dice_score']
no_cancer_dice = metrics['test']['average_non_cancer_dice_score']
FP = metrics['test']['gt_n_pd_c']
TP = metrics['test']['gt_c_pd_c_overlap'] + metrics['test']['gt_c_pd_c_no_overlap']
FN = metrics['test']['gt_c_pd_no_c']
TN = metrics['test']['gt_n_pd_n']
if TP + FP == 0:
precision = 0
else:
precision = TP / (TP + FP)
recall = TP / (TP + FN)
specificity = TN / (TN + FP)
if TP == 0:
TP_with_overlap = 0
else:
TP_with_overlap = metrics['test']['gt_c_pd_c_overlap'] / TP
false_positive = FP / (FP + TN)
false_negative = FN / (FN + TP)
print("{:<12} {:<15} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f}".format(model, dataset,
avg_dice,
cancer_dice,
no_cancer_dice,
precision,
recall,
TP_with_overlap,
false_positive,
false_negative
))
# outline, gray2rgb, overlay_plot are adapted from: https://github.com/mateuszbuda/brain-segmentation-pytorch/blob/master/utils.py
def outline(image, mask, color):
"""
Give a color to the outline of the mask
Args:
image: an image
mask: a label
color: a RGB color for outline
Return:
image: the image which is drawn outline
"""
mask = np.round(mask)
yy, xx = np.nonzero(mask)
for y, x in zip(yy, xx):
if 0.0 < np.mean(mask[max(0, y - 1) : y + 2, max(0, x - 1) : x + 2]) < 1.0:
image[max(0, y) : y + 1, max(0, x) : x + 1] = color
return image
def gray2rgb(image):
"""
Change a one channel image to a RGB image
Args:
image: an image which has one channel
Return:
image: the converted image which has a RGB channel
"""
w, h = image.shape
image += np.abs(np.min(image))
image_max = np.abs(np.max(image))
if image_max > 0:
image /= image_max
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = image * 255
return ret
def overlay_plot(img, y_true, y_pred, index, args, save_plot=False):
"""
Change a one channel image to a RGB image
Args:
img: an image which has one channel
y_pred: the predicted label from the model
y_true: the ground truth label
index: the index number
args: arguemtns from the parser
save_plot: if True, it saves plots
Return:
image: the overlay image
"""
image = gray2rgb(img[0])
image = outline(image, y_true[0], color=[255, 0, 0])
image = outline(image, y_pred[0], color=[0, 255, 0])
if save_plot == True:
filename = "img-{}.png".format(index)
filepath = os.path.join(args.plot_path, filename)
io.imsave(filepath, image)
return image
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Utility funcitons for statistics on the dataset or analysis of metrics"
)
parser.add_argument(
"--method",
type=str,
default=None,
help="util function to be executed",
)
parser.add_argument(
"--jsonfile", type=str, default="./data/data_index_subsets.json",
help="root folder with json with assigned subsets"
)
parser.add_argument(
"--metric_path", type=str, default="./save/metrics",
help="root folder with json with assigned subsets"
)
parser.add_argument(
"--plot-path", type=str, default="./save/plots",
help="root folder to save plots"
)
args = parser.parse_args()
if args.method == 'subset_stats':
get_subset_stats(args.jsonfile)
elif args.method == 'metrics_summary':
metrics_summary(args.metric_path)
| [
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"numpy.max",
"numpy.empty",
"skimage.io.imsave",
"numpy.nonzero",
"numpy.min",
"json.load",
"numpy.round"
] | [((1507, 1530), 'os.listdir', 'os.listdir', (['metric_path'], {}), '(metric_path)\n', (1517, 1530), False, 'import os\n'), ((4299, 4313), 'numpy.round', 'np.round', (['mask'], {}), '(mask)\n', (4307, 4313), True, 'import numpy as np\n'), ((4327, 4343), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (4337, 4343), True, 'import numpy as np\n'), ((4940, 4975), 'numpy.empty', 'np.empty', (['(w, h, 3)'], {'dtype': 'np.uint8'}), '((w, h, 3), dtype=np.uint8)\n', (4948, 4975), True, 'import numpy as np\n'), ((5899, 6013), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Utility funcitons for statistics on the dataset or analysis of metrics"""'}), "(description=\n 'Utility funcitons for statistics on the dataset or analysis of metrics')\n", (5922, 6013), False, 'import argparse\n'), ((346, 366), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (355, 366), False, 'import json\n'), ((1552, 1583), 'os.path.join', 'os.path.join', (['metric_path', 'file'], {}), '(metric_path, file)\n', (1564, 1583), False, 'import os\n'), ((4828, 4841), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (4834, 4841), True, 'import numpy as np\n'), ((4866, 4879), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4872, 4879), True, 'import numpy as np\n'), ((5767, 5805), 'os.path.join', 'os.path.join', (['args.plot_path', 'filename'], {}), '(args.plot_path, filename)\n', (5779, 5805), False, 'import os\n'), ((5814, 5840), 'skimage.io.imsave', 'io.imsave', (['filepath', 'image'], {}), '(filepath, image)\n', (5823, 5840), True, 'import skimage.io as io\n'), ((1649, 1669), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1658, 1669), False, 'import json\n')] |
import numpy as np
from tsfuse.transformers.uniqueness import *
from tsfuse.data import Collection
def test_has_duplicate_true():
x = Collection.from_array([1, 2, 3, 3])
actual = HasDuplicate().transform(x).values
np.testing.assert_equal(actual, True)
def test_has_duplicate_false():
x = Collection.from_array([1, 2, 3, 4])
actual = HasDuplicate().transform(x).values
np.testing.assert_equal(actual, False)
def test_has_duplicate_min_true():
x = Collection.from_array([1, 1, 2, 3])
actual = HasDuplicateMin().transform(x).values
np.testing.assert_equal(actual, True)
def test_has_duplicate_min_false():
x = Collection.from_array([2, 3, 4, 4])
actual = HasDuplicateMin().transform(x).values
np.testing.assert_equal(actual, False)
def test_has_duplicate_max_true():
x = Collection.from_array([2, 3, 4, 4])
actual = HasDuplicateMax().transform(x).values
np.testing.assert_equal(actual, True)
def test_has_duplicate_max_false():
x = Collection.from_array([1, 1, 2, 3])
actual = HasDuplicateMax().transform(x).values
np.testing.assert_equal(actual, False)
def test_has_duplicate_empty():
x = Collection.from_array([np.nan])
actual = HasDuplicate().transform(x).values
np.testing.assert_equal(actual, False)
def test_number_of_unique_values_rel():
x = Collection.from_array([1, 2, 3, 4])
actual = NumberUniqueValues(rel=True).transform(x).values
np.testing.assert_equal(actual, 1.0)
def test_number_of_unique_values_abs():
x = Collection.from_array([1, 2, 3, 4])
actual = NumberUniqueValues(rel=False).transform(x).values
np.testing.assert_equal(actual, 4)
def test_number_of_unique_values_1_rel():
x = Collection.from_array([2, 2, 2, 2])
actual = NumberUniqueValues(rel=True).transform(x).values
np.testing.assert_equal(actual, 0.25)
def test_number_of_unique_values_1_abs():
x = Collection.from_array([2, 2, 2, 2])
actual = NumberUniqueValues(rel=False).transform(x).values
np.testing.assert_equal(actual, 1)
def test_number_of_unique_values_0_rel():
x = Collection.from_array([np.nan])
actual = NumberUniqueValues(rel=True).transform(x).values
np.testing.assert_equal(actual, np.nan)
def test_number_of_unique_values_0_abs():
x = Collection.from_array([np.nan])
actual = NumberUniqueValues(rel=False).transform(x).values
np.testing.assert_equal(actual, np.nan)
def test_sum_of_reoccurring_data_poins():
x = Collection.from_array([1, 1, 2, 3, 3, 4])
actual = SumReoccurringDataPoints().transform(x).values
np.testing.assert_equal(actual, 8)
def test_sum_of_reoccurring_data_points_0():
x = Collection.from_array([1, 2, 3, 4])
actual = SumReoccurringDataPoints().transform(x).values
np.testing.assert_equal(actual, 0)
def test_sum_of_reoccurring_values():
x = Collection.from_array([1, 1, 2, 3, 3, 4])
actual = SumReoccurringValues().transform(x).values
np.testing.assert_equal(actual, 4)
def test_sum_of_reoccurring_values_0():
x = Collection.from_array([1, 2, 3, 4])
actual = SumReoccurringValues().transform(x).values
np.testing.assert_equal(actual, 0)
| [
"numpy.testing.assert_equal",
"tsfuse.data.Collection.from_array"
] | [((141, 176), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 2, 3, 3]'], {}), '([1, 2, 3, 3])\n', (162, 176), False, 'from tsfuse.data import Collection\n'), ((229, 266), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(True)'], {}), '(actual, True)\n', (252, 266), True, 'import numpy as np\n'), ((309, 344), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (330, 344), False, 'from tsfuse.data import Collection\n'), ((397, 435), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(False)'], {}), '(actual, False)\n', (420, 435), True, 'import numpy as np\n'), ((481, 516), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 1, 2, 3]'], {}), '([1, 1, 2, 3])\n', (502, 516), False, 'from tsfuse.data import Collection\n'), ((572, 609), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(True)'], {}), '(actual, True)\n', (595, 609), True, 'import numpy as np\n'), ((656, 691), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[2, 3, 4, 4]'], {}), '([2, 3, 4, 4])\n', (677, 691), False, 'from tsfuse.data import Collection\n'), ((747, 785), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(False)'], {}), '(actual, False)\n', (770, 785), True, 'import numpy as np\n'), ((831, 866), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[2, 3, 4, 4]'], {}), '([2, 3, 4, 4])\n', (852, 866), False, 'from tsfuse.data import Collection\n'), ((922, 959), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(True)'], {}), '(actual, True)\n', (945, 959), True, 'import numpy as np\n'), ((1006, 1041), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 1, 2, 3]'], {}), '([1, 1, 2, 3])\n', (1027, 1041), False, 'from tsfuse.data import Collection\n'), ((1097, 1135), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(False)'], {}), '(actual, False)\n', (1120, 1135), True, 'import numpy as np\n'), ((1178, 1209), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[np.nan]'], {}), '([np.nan])\n', (1199, 1209), False, 'from tsfuse.data import Collection\n'), ((1262, 1300), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(False)'], {}), '(actual, False)\n', (1285, 1300), True, 'import numpy as np\n'), ((1351, 1386), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1372, 1386), False, 'from tsfuse.data import Collection\n'), ((1453, 1489), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(1.0)'], {}), '(actual, 1.0)\n', (1476, 1489), True, 'import numpy as np\n'), ((1540, 1575), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1561, 1575), False, 'from tsfuse.data import Collection\n'), ((1643, 1677), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(4)'], {}), '(actual, 4)\n', (1666, 1677), True, 'import numpy as np\n'), ((1730, 1765), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[2, 2, 2, 2]'], {}), '([2, 2, 2, 2])\n', (1751, 1765), False, 'from tsfuse.data import Collection\n'), ((1832, 1869), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(0.25)'], {}), '(actual, 0.25)\n', (1855, 1869), True, 'import numpy as np\n'), ((1922, 1957), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[2, 2, 2, 2]'], {}), '([2, 2, 2, 2])\n', (1943, 1957), False, 'from tsfuse.data import Collection\n'), ((2025, 2059), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(1)'], {}), '(actual, 1)\n', (2048, 2059), True, 'import numpy as np\n'), ((2112, 2143), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[np.nan]'], {}), '([np.nan])\n', (2133, 2143), False, 'from tsfuse.data import Collection\n'), ((2210, 2249), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', 'np.nan'], {}), '(actual, np.nan)\n', (2233, 2249), True, 'import numpy as np\n'), ((2302, 2333), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[np.nan]'], {}), '([np.nan])\n', (2323, 2333), False, 'from tsfuse.data import Collection\n'), ((2401, 2440), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', 'np.nan'], {}), '(actual, np.nan)\n', (2424, 2440), True, 'import numpy as np\n'), ((2493, 2534), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 1, 2, 3, 3, 4]'], {}), '([1, 1, 2, 3, 3, 4])\n', (2514, 2534), False, 'from tsfuse.data import Collection\n'), ((2599, 2633), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(8)'], {}), '(actual, 8)\n', (2622, 2633), True, 'import numpy as np\n'), ((2689, 2724), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2710, 2724), False, 'from tsfuse.data import Collection\n'), ((2789, 2823), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(0)'], {}), '(actual, 0)\n', (2812, 2823), True, 'import numpy as np\n'), ((2872, 2913), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 1, 2, 3, 3, 4]'], {}), '([1, 1, 2, 3, 3, 4])\n', (2893, 2913), False, 'from tsfuse.data import Collection\n'), ((2974, 3008), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(4)'], {}), '(actual, 4)\n', (2997, 3008), True, 'import numpy as np\n'), ((3059, 3094), 'tsfuse.data.Collection.from_array', 'Collection.from_array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (3080, 3094), False, 'from tsfuse.data import Collection\n'), ((3155, 3189), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual', '(0)'], {}), '(actual, 0)\n', (3178, 3189), True, 'import numpy as np\n')] |
"""
Centrographic measures for point patterns
TODO
- testing
- documentation
"""
__author__ = "<NAME> <EMAIL>"
__all__ = ['mbr', 'hull', 'mean_center', 'weighted_mean_center',
'manhattan_median', 'std_distance', 'euclidean_median', 'ellipse',
'skyum', 'dtot',"_circle"]
import sys
import numpy as np
import warnings
import copy
from math import pi as PI
from scipy.spatial import ConvexHull
from pysal.lib.cg import get_angle_between, Ray, is_clockwise
from scipy.spatial import distance as dist
from scipy.optimize import minimize
not_clockwise = lambda x: not is_clockwise(x)
MAXD = sys.float_info.max
MIND = sys.float_info.min
def mbr(points):
"""
Find minimum bounding rectangle of a point array.
Parameters
----------
points : arraylike
(n,2), (x,y) coordinates of a series of event points.
Returns
-------
min_x : float
leftmost value of the vertices of minimum bounding rectangle.
min_y : float
downmost value of the vertices of minimum bounding rectangle.
max_x : float
rightmost value of the vertices of minimum bounding rectangle.
max_y : float
upmost value of the vertices of minimum bounding rectangle.
"""
points = np.asarray(points)
min_x = min_y = MAXD
max_x = max_y = MIND
for point in points:
x, y = point
if x > max_x:
max_x = x
if x < min_x:
min_x = x
if y > max_y:
max_y = y
if y < min_y:
min_y = y
return min_x, min_y, max_x, max_y
def hull(points):
"""
Find convex hull of a point array.
Parameters
----------
points: arraylike
(n,2), (x,y) coordinates of a series of event points.
Returns
-------
_ : array
(h,2), points defining the hull in counterclockwise order.
"""
points = np.asarray(points)
h = ConvexHull(points)
return points[h.vertices]
def mean_center(points):
"""
Find mean center of a point array.
Parameters
----------
points: arraylike
(n,2), (x,y) coordinates of a series of event points.
Returns
-------
_ : array
(2,), (x,y) coordinates of the mean center.
"""
points = np.asarray(points)
return points.mean(axis=0)
def weighted_mean_center(points, weights):
"""
Find weighted mean center of a marked point pattern.
Parameters
----------
points : arraylike
(n,2), (x,y) coordinates of a series of event points.
weights : arraylike
a series of attribute values of length n.
Returns
-------
_ : array
(2,), (x,y) coordinates of the weighted mean center.
"""
points, weights = np.asarray(points), np.asarray(weights)
w = weights * 1. / weights.sum()
w.shape = (1, len(points))
return np.dot(w, points)[0]
def manhattan_median(points):
"""
Find manhattan median of a point array.
Parameters
----------
points : arraylike
(n,2), (x,y) coordinates of a series of event points.
Returns
-------
_ : array
(2,), (x,y) coordinates of the manhattan median.
"""
points = np.asarray(points)
if not len(points) % 2:
s = "Manhattan Median is not unique for even point patterns."
warnings.warn(s)
return np.median(points, axis=0)
def std_distance(points):
"""
Calculate standard distance of a point array.
Parameters
----------
points : arraylike
(n,2), (x,y) coordinates of a series of event points.
Returns
-------
_ : float
standard distance.
"""
points = np.asarray(points)
n, p = points.shape
m = points.mean(axis=0)
return np.sqrt(((points*points).sum(axis=0)/n - m*m).sum())
def ellipse(points):
"""
Calculate parameters of standard deviational ellipse for a point pattern.
Parameters
----------
points : arraylike
(n,2), (x,y) coordinates of a series of event points.
Returns
-------
_ : float
semi-major axis.
_ : float
semi-minor axis.
theta : float
clockwise rotation angle of the ellipse.
Notes
-----
Implements approach from:
https://www.icpsr.umich.edu/CrimeStat/files/CrimeStatChapter.4.pdf
"""
points = np.asarray(points)
n, k = points.shape
x = points[:, 0]
y = points[:, 1]
xd = x - x.mean()
yd = y - y.mean()
xss = (xd * xd).sum()
yss = (yd * yd).sum()
cv = (xd * yd).sum()
num = (xss - yss) + np.sqrt((xss - yss)**2 + 4 * (cv)**2)
den = 2 * cv
theta = np.arctan(num / den)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
n_2 = n - 2
sd_x = (2 * (xd * cos_theta - yd * sin_theta)**2).sum() / n_2
sd_y = (2 * (xd * sin_theta - yd * cos_theta)**2).sum() / n_2
return np.sqrt(sd_x), np.sqrt(sd_y), theta
def dtot(coord, points):
"""
Sum of Euclidean distances between event points and a selected point.
Parameters
----------
coord : arraylike
(x,y) coordinates of a point.
points : arraylike
(n,2), (x,y) coordinates of a series of event points.
Returns
-------
d : float
sum of Euclidean distances.
"""
points = np.asarray(points)
xd = points[:, 0] - coord[0]
yd = points[:, 1] - coord[1]
d = np.sqrt(xd*xd + yd*yd).sum()
return d
def euclidean_median(points):
"""
Calculate the Euclidean median for a point pattern.
Parameters
----------
points: arraylike
(n,2), (x,y) coordinates of a series of event points.
Returns
-------
_ : array
(2,), (x,y) coordinates of the Euclidean median.
"""
points = np.asarray(points)
start = mean_center(points)
res = minimize(dtot, start, args=(points,))
return res['x']
def skyum(points, not_hull=True):
"""
Implements Skyum (1990)'s algorithm for the minimum bounding circle in R^2.
0. Store points clockwise.
1. Find p in S that maximizes angle(prec(p), p, succ(p) THEN radius(prec(p),
p, succ(p)). This is also called the lexicographic maximum, and is the last
entry of a list of (radius, angle) in lexicographical order.
2a. If angle(prec(p), p, succ(p)) <= 90 degrees, then finish.
2b. If not, remove p from set.
"""
points = hull(points).tolist()
if not_clockwise(points):
points.reverse()
if not_clockwise(points):
raise Exception('Points are neither clockwise nor counterclockwise')
POINTS = copy.deepcopy(points)
removed = []
i=0
while True:
angles = [_angle(_prec(p, points), p, _succ(p, points)) for p in points]
circles = [_circle(_prec(p, points), p, _succ(p, points)) for p in points]
radii = [c[0] for c in circles]
lexord = np.lexsort((radii, angles)) #confusing as hell defaults...
lexmax = lexord[-1]
candidate = (_prec(points[lexmax], points),
points[lexmax],
_succ(points[lexmax], points))
if angles[lexmax] <= PI/2.0:
#print("Constrained by points: {}".format(candidate))
return _circle(*candidate), points, removed, candidate
else:
try:
removed.append((points.pop(lexmax), i))
except IndexError:
raise Exception("Construction of Minimum Bounding Circle failed!")
i+=1
def _angle(p,q,r):
"""
compute the positive angle formed by PQR
"""
return np.abs(get_angle_between(Ray(q,p),Ray(q,r)))
def _prec(p,l):
"""
retrieve the predecessor of p in list l
"""
pos = l.index(p)
if pos-1 < 0:
return l[-1]
else:
return l[pos-1]
def _succ(p,l):
"""
retrieve the successor of p in list l
"""
pos = l.index(p)
if pos+1 >= len(l):
return l[0]
else:
return l[pos+1]
def _circle(p,q,r, dmetric=dist.euclidean):
"""
Returns (radius, (center_x, center_y)) of the circumscribed circle by the
triangle pqr.
note, this does not assume that p!=q!=r
"""
px,py = p
qx,qy = q
rx,ry = r
if np.allclose(np.abs(_angle(p,q,r)), PI):
radius = dmetric(p,r)/2.
center_x = (px + rx)/2.
center_y = (py + ry)/2.
elif np.allclose(np.abs(_angle(p,q,r)), 0):
radius = dmetric(p,q)/2.
center_x = (px + qx)/2.
center_y = (py + qy)/2.
else:
D = 2*(px*(qy - ry) + qx*(ry - py) + rx*(py - qy))
center_x = ((px**2 + py**2)*(qy-ry) + (qx**2 + qy**2)*(ry-py)
+ (rx**2 + ry**2)*(py-qy)) / float(D)
center_y = ((px**2 + py**2)*(rx-qx) + (qx**2 + qy**2)*(px-rx)
+ (rx**2 + ry**2)*(qx-px)) / float(D)
radius = dmetric((center_x, center_y), p)
return radius, (center_x, center_y)
| [
"numpy.median",
"numpy.sqrt",
"pysal.lib.cg.Ray",
"scipy.optimize.minimize",
"numpy.asarray",
"scipy.spatial.ConvexHull",
"numpy.lexsort",
"numpy.dot",
"warnings.warn",
"numpy.cos",
"copy.deepcopy",
"numpy.sin",
"pysal.lib.cg.is_clockwise",
"numpy.arctan"
] | [((1284, 1302), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (1294, 1302), True, 'import numpy as np\n'), ((1935, 1953), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (1945, 1953), True, 'import numpy as np\n'), ((1962, 1980), 'scipy.spatial.ConvexHull', 'ConvexHull', (['points'], {}), '(points)\n', (1972, 1980), False, 'from scipy.spatial import ConvexHull\n'), ((2325, 2343), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (2335, 2343), True, 'import numpy as np\n'), ((3305, 3323), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (3315, 3323), True, 'import numpy as np\n'), ((3458, 3483), 'numpy.median', 'np.median', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (3467, 3483), True, 'import numpy as np\n'), ((3791, 3809), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (3801, 3809), True, 'import numpy as np\n'), ((4497, 4515), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (4507, 4515), True, 'import numpy as np\n'), ((4794, 4814), 'numpy.arctan', 'np.arctan', (['(num / den)'], {}), '(num / den)\n', (4803, 4814), True, 'import numpy as np\n'), ((4831, 4844), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4837, 4844), True, 'import numpy as np\n'), ((4861, 4874), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4867, 4874), True, 'import numpy as np\n'), ((5479, 5497), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (5489, 5497), True, 'import numpy as np\n'), ((5954, 5972), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (5964, 5972), True, 'import numpy as np\n'), ((6015, 6052), 'scipy.optimize.minimize', 'minimize', (['dtot', 'start'], {'args': '(points,)'}), '(dtot, start, args=(points,))\n', (6023, 6052), False, 'from scipy.optimize import minimize\n'), ((6781, 6802), 'copy.deepcopy', 'copy.deepcopy', (['points'], {}), '(points)\n', (6794, 6802), False, 'import copy\n'), ((591, 606), 'pysal.lib.cg.is_clockwise', 'is_clockwise', (['x'], {}), '(x)\n', (603, 606), False, 'from pysal.lib.cg import get_angle_between, Ray, is_clockwise\n'), ((2830, 2848), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (2840, 2848), True, 'import numpy as np\n'), ((2850, 2869), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (2860, 2869), True, 'import numpy as np\n'), ((2949, 2966), 'numpy.dot', 'np.dot', (['w', 'points'], {}), '(w, points)\n', (2955, 2966), True, 'import numpy as np\n'), ((3430, 3446), 'warnings.warn', 'warnings.warn', (['s'], {}), '(s)\n', (3443, 3446), False, 'import warnings\n'), ((4727, 4766), 'numpy.sqrt', 'np.sqrt', (['((xss - yss) ** 2 + 4 * cv ** 2)'], {}), '((xss - yss) ** 2 + 4 * cv ** 2)\n', (4734, 4766), True, 'import numpy as np\n'), ((5034, 5047), 'numpy.sqrt', 'np.sqrt', (['sd_x'], {}), '(sd_x)\n', (5041, 5047), True, 'import numpy as np\n'), ((5049, 5062), 'numpy.sqrt', 'np.sqrt', (['sd_y'], {}), '(sd_y)\n', (5056, 5062), True, 'import numpy as np\n'), ((7065, 7092), 'numpy.lexsort', 'np.lexsort', (['(radii, angles)'], {}), '((radii, angles))\n', (7075, 7092), True, 'import numpy as np\n'), ((5572, 5598), 'numpy.sqrt', 'np.sqrt', (['(xd * xd + yd * yd)'], {}), '(xd * xd + yd * yd)\n', (5579, 5598), True, 'import numpy as np\n'), ((7794, 7803), 'pysal.lib.cg.Ray', 'Ray', (['q', 'p'], {}), '(q, p)\n', (7797, 7803), False, 'from pysal.lib.cg import get_angle_between, Ray, is_clockwise\n'), ((7803, 7812), 'pysal.lib.cg.Ray', 'Ray', (['q', 'r'], {}), '(q, r)\n', (7806, 7812), False, 'from pysal.lib.cg import get_angle_between, Ray, is_clockwise\n')] |
# ======================================================================================================================
# * Weighted Holistic Atom Localization and Entity Shape (WHALES) descriptors *
# v. 1, May 2018
# ----------------------------------------------------------------------------------------------------------------------
# This file contains all the necessary functions to calculate WHALES descriptors for the
# molecules contained in an rdkit supplier.
#
# <NAME>, May 2018, ETH Zurich & University of Milano-Bicocca, <EMAIL>
# please cite as:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>
# "Scaffold hopping from natural products to synthetic mimetics by holistic molecular similarity",
# Nature Communications Chemistry 1, 44, 2018.
# ======================================================================================================================
import time
import numpy as np
import pandas as ps
import rdkit.Chem as Chem
import lcm
import mol_properties
# ----------------------------------------------------------------------------------------------------------------------
def whales_from_mol(mol, charge_threshold=0, do_charge=True, property_name=''):
# check for correct molecule import, throw an error if import/sanitization fail
mol, err = import_mol(mol)
errors = 0
if err == 1:
x = np.full((33,), -999.0)
errors += err
print('Molecule not loaded.')
else:
# coordinates and partial charges (checks for computed charges)
coords, w, err = mol_properties.get_coordinates_and_prop(mol, property_name, do_charge)
if err == 0: # no errors in charge
# does descriptors
x, lab = do_lcd(coords, w, charge_threshold)
else:
x = np.full((33,), -999.0)
errors += 1
print('No computed charges.')
return x, lab
def import_mol(mol):
# options for sanitization
san_opt = Chem.SanitizeFlags.SANITIZE_ALL ^ Chem.SanitizeFlags.SANITIZE_KEKULIZE
# initialization
err = 0
if mol is None:
err = 1
else:
# sanitize
sanit_fail = Chem.SanitizeMol(mol, catchErrors=True, sanitizeOps=san_opt)
if sanit_fail:
raise ValueError(sanit_fail)
err = 1
return mol, err
# ----------------------------------------------------------------------------------------------------------------------
def do_lcd(coords, w, thr):
"""
Core function for computing 3D LCD descriptors, starting from the coordinates and the partial charges.
:param coords: molecular 3D coordinate matrix (n_at x 3)
w(n_at x 1): molecular property to consider
:param w: partial charges
:param lcm_thr: threshold to be used to retain atoms (e.g., 0.001)
:return:
x_all: descriptors for the molecules (1 x p)
lab_all: descriptors labels (1 x p)
"""
# calculates lcm with weight scheme 1 (all charges)
res = lcm.lmahal(coords, w)
# applies sign
res = apply_sign(w, res, thr)
x_all, lab_all = extract_lcm(res) # MDs and labels
return x_all, lab_all
# ----------------------------------------------------------------------------------------------------------------------
def apply_sign(w, res, thr=0):
"""
applies the sign to negatively charged atoms.
:param w: partial charge
:param res: computed atomic descriptors
:param thr: threshold to consider atoms as negatively charged (default is 0); other atoms are removed
:return: computed atomic descriptors with adjusted sign
"""
# find negative weights and assigns a "-"
a, b = np.where(w < 0)
res[a, :] *= -1
# removes atoms with abs(w) smaller than the thr
a, b = np.where(abs(w) < thr)
res = np.delete(res, a, 0)
return res
# ----------------------------------------------------------------------------------------------------------------------
def extract_lcm(data, start=0, end=100, step=10, lab_string=''):
"""
extracts descriptors referred to the whole molecule from numbers referred to atoms, e.g., R and I.
====================================================================================================================
:param:
data (n_atom x p): atomic description
start (int): minimum percentile (default = minimum value)
end (int): maximum percentile (default = maximum value)
step (int): step for percentiles generation (default, 10 corresponds to deciles)
lab_string(str): additional string to be added to differentiate weighting schemes
:returns
x(1 x p1): molecular description based on percentiles
labels(1 x p1): descriptor labels
====================================================================================================================
"""
# Calculates percentiles according to the specified settings
perc = range(start, end + 1, step)
x = np.percentile(data, list(perc), axis=0)
x = np.concatenate((x[:, 0], x[:, 1], x[:, 2]), axis=0) # Flattens preserving the ordering
# rounds the descriptors to the third decimal place
x = np.round(x, 3)
# produces labels strings
strings = ['R_', 'I_', 'IR_']
labels = list()
for j in strings:
for i in perc:
labels.append(j + lab_string + str(int(i / 10)))
return x, labels
| [
"mol_properties.get_coordinates_and_prop",
"numpy.where",
"numpy.delete",
"rdkit.Chem.SanitizeMol",
"numpy.concatenate",
"numpy.full",
"lcm.lmahal",
"numpy.round"
] | [((2986, 3007), 'lcm.lmahal', 'lcm.lmahal', (['coords', 'w'], {}), '(coords, w)\n', (2996, 3007), False, 'import lcm\n'), ((3663, 3678), 'numpy.where', 'np.where', (['(w < 0)'], {}), '(w < 0)\n', (3671, 3678), True, 'import numpy as np\n'), ((3797, 3817), 'numpy.delete', 'np.delete', (['res', 'a', '(0)'], {}), '(res, a, 0)\n', (3806, 3817), True, 'import numpy as np\n'), ((5000, 5051), 'numpy.concatenate', 'np.concatenate', (['(x[:, 0], x[:, 1], x[:, 2])'], {'axis': '(0)'}), '((x[:, 0], x[:, 1], x[:, 2]), axis=0)\n', (5014, 5051), True, 'import numpy as np\n'), ((5153, 5167), 'numpy.round', 'np.round', (['x', '(3)'], {}), '(x, 3)\n', (5161, 5167), True, 'import numpy as np\n'), ((1375, 1397), 'numpy.full', 'np.full', (['(33,)', '(-999.0)'], {}), '((33,), -999.0)\n', (1382, 1397), True, 'import numpy as np\n'), ((1565, 1635), 'mol_properties.get_coordinates_and_prop', 'mol_properties.get_coordinates_and_prop', (['mol', 'property_name', 'do_charge'], {}), '(mol, property_name, do_charge)\n', (1604, 1635), False, 'import mol_properties\n'), ((2166, 2226), 'rdkit.Chem.SanitizeMol', 'Chem.SanitizeMol', (['mol'], {'catchErrors': '(True)', 'sanitizeOps': 'san_opt'}), '(mol, catchErrors=True, sanitizeOps=san_opt)\n', (2182, 2226), True, 'import rdkit.Chem as Chem\n'), ((1798, 1820), 'numpy.full', 'np.full', (['(33,)', '(-999.0)'], {}), '((33,), -999.0)\n', (1805, 1820), True, 'import numpy as np\n')] |
"""Plot energy dispersion example."""
import matplotlib.pyplot as plt
import astropy.units as u
import numpy as np
from gammapy.irf import EnergyDispersion
ebounds = np.logspace(-1, 2, 101) * u.TeV
energy_dispersion = EnergyDispersion.from_gauss(e_true=ebounds,
e_reco=ebounds,
sigma=0.3)
energy_dispersion.plot_matrix()
plt.show()
| [
"gammapy.irf.EnergyDispersion.from_gauss",
"numpy.logspace",
"matplotlib.pyplot.show"
] | [((219, 289), 'gammapy.irf.EnergyDispersion.from_gauss', 'EnergyDispersion.from_gauss', ([], {'e_true': 'ebounds', 'e_reco': 'ebounds', 'sigma': '(0.3)'}), '(e_true=ebounds, e_reco=ebounds, sigma=0.3)\n', (246, 289), False, 'from gammapy.irf import EnergyDispersion\n'), ((419, 429), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (427, 429), True, 'import matplotlib.pyplot as plt\n'), ((167, 190), 'numpy.logspace', 'np.logspace', (['(-1)', '(2)', '(101)'], {}), '(-1, 2, 101)\n', (178, 190), True, 'import numpy as np\n')] |
#!/usr/bin/env/python
from typing import Tuple, List, Any, Sequence
import tensorflow as tf
import time
import os
import json
import numpy as np
import pickle
import random
import utils
from utils import MLP, dataset_info, ThreadedIterator, graph_to_adj_mat, SMALL_NUMBER, LARGE_NUMBER, graph_to_adj_mat
import csv
class ChemModel(object):
@classmethod
def default_params(cls):
return {
}
def __init__(self, args):
self.args = args
data_dir = ''
if '--data_dir' in args and args['--data_dir'] is not None:
data_dir = args['--data_dir']
self.data_dir = data_dir
if 'dataset' in args:
self.dataset = dataset = args['dataset']
else:
self.dataset = dataset = "ba" # "qm9"#args.get('--dataset')
self.results_file_path = "latent_vars.csv"
with open(self.results_file_path,"w") as f:
f.write("m,n,z0_mean,z0_var,z1_mean,z1_var,z2_mean,z2_var,z3_mean,z3_var,z4_mean,z4_var\n")
self.num_edge_types = 1
# Collect parameters:
self.params = params = self.default_params()
self.params['dataset'] = dataset
self.params['train_file'] ='data/molecules_train_%s.json' % self.dataset
self.params['valid_file'] = 'data/molecules_valid_%s.json' % self.dataset
if 'batch_size' in args: self.params['batch_size'] = args['batch_size']
if 'num_epochs' in args: self.params['num_epochs'] = self.params['epoch_to_generate'] = args['num_epochs']
if 'hidden_size' in args: self.params['hidden_size'] = args['hidden_size']
if 'lr' in args: self.params['lr'] = args['lr']
if 'kl_trade_off_lambda' in args: self.params['kl_trade_off_lambda'] = args['kl_trade_off_lambda']
if 'optimization_step' in args: self.params['optimization_step'] = args['optimization_step']
self.params['num_symbols'] = 1
self.run_id = "_".join([time.strftime("%Y-%m-%d-%H-%M-%S"), str(os.getpid())])
log_dir = args.get('--log_dir') or '.'
self.log_file = os.path.join(log_dir, "%s_log_%s.json" % (self.run_id, dataset))
self.best_model_file = os.path.join(log_dir, "%s_model.pickle" % self.run_id)
with open(os.path.join(log_dir, "%s_params_%s.json" % (self.run_id, dataset)), "w") as f:
json.dump(params, f)
print("Run %s starting with following parameters:\n%s" % (self.run_id, json.dumps(self.params)))
random.seed(params['random_seed'])
np.random.seed(params['random_seed'])
# Load data:
self.max_num_vertices, self.max_num_vertices, self.annotation_size = 0,0,0
self.train_data = self.load_data(params['train_file'], is_training_data=True)
self.valid_data = self.load_data(params['valid_file'], is_training_data=False)
# Build the actual model
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.graph = tf.Graph()
# self.sess = tf.InteractiveSession(graph=self.graph, config=config)
# from tensorflow.python import debug as tf_debug
# self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)
self.sess = tf.Session(graph=self.graph, config=config)
#self.sess = tf.Session()
# self.sess = tf_debug.LocalCLIDebugWrapperSession(sess)
with self.graph.as_default():
tf.set_random_seed(params['random_seed'])
self.placeholders = {}
self.weights = {}
self.ops = {}
self.make_model()
self.make_train_step()
# Restore/initialize variables:
restore_file = args.get('--restore')
if restore_file is not None:
self.restore_model(restore_file)
else:
self.initialize_model()
def load_data(self, file_name, is_training_data: bool):
full_path = os.path.join(self.data_dir, file_name)
print("Loading data from %s" % full_path)
with open(full_path, 'r') as f:
data = json.load(f)
restrict = self.args.get("--restrict_data")
if restrict is not None and restrict > 0:
data = data[:restrict]
# Get some common data out:
num_fwd_edge_types = len(utils.bond_dict) - 1
for g in data:
self.max_num_vertices = max(self.max_num_vertices, max([v for e in g['graph'] for v in [e[0], e[2]]]))
self.num_edge_types = max(self.num_edge_types, num_fwd_edge_types * (1 if self.params['tie_fwd_bkwd'] else 2))
self.annotation_size = max(self.annotation_size, len(data[0]["node_features"][0]))
return self.process_raw_graphs(data, is_training_data, file_name)
@staticmethod
def graph_string_to_array(graph_string: str) -> List[List[int]]:
return [[int(v) for v in s.split(' ')]
for s in graph_string.split('\n')]
def process_raw_graphs(self, raw_data, is_training_data, file_name, bucket_sizes=None):
raise Exception("Models have to implement process_raw_graphs!")
def make_model(self):
self.placeholders['target_values'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],
name='target_values')
self.placeholders['target_mask'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],
name='target_mask')
self.placeholders['num_graphs'] = tf.placeholder(tf.int64, [], name='num_graphs')
self.placeholders['out_layer_dropout_keep_prob'] = tf.placeholder(tf.float32, [],
name='out_layer_dropout_keep_prob')
# whether this session is for generating new graphs or not
self.placeholders['is_generative'] = tf.placeholder(tf.bool, [], name='is_generative')
with tf.variable_scope("graph_model"):
self.prepare_specific_graph_model()
# Initial state: embedding
initial_state = self.get_node_embedding_state(self.placeholders['initial_node_representation'])
# This does the actual graph work:
if self.params['use_graph']:
if self.params["residual_connection_on"]:
self.ops['final_node_representations'] = self.compute_final_node_representations_with_residual(
initial_state,
tf.transpose(self.placeholders['adjacency_matrix'], [1, 0, 2, 3]), "_encoder")
else:
self.ops['final_node_representations'] = self.compute_final_node_representations_without_residual(
initial_state,
tf.transpose(self.placeholders['adjacency_matrix'], [1, 0, 2, 3]),
self.weights['edge_weights_encoder'],
self.weights['edge_biases_encoder'],
self.weights['node_gru_encoder'], "gru_scope_encoder")
else:
self.ops['final_node_representations'] = initial_state
# Calculate p(z|x)'s mean and log variance
self.ops['mean'], self.ops['logvariance'] = self.compute_mean_and_logvariance()
# Sample from a gaussian distribution according to the mean and log variance
self.ops['z_sampled'] = self.sample_with_mean_and_logvariance()
# Construct logit matrices for both edges and edge types
self.construct_logit_matrices()
self.ops['loss'] = self.construct_loss()
def make_train_step(self):
trainable_vars = self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
optimizer = tf.train.AdamOptimizer(self.params['learning_rate'])
grads_and_vars = optimizer.compute_gradients(self.ops['loss'], var_list=trainable_vars)
clipped_grads = []
for grad, var in grads_and_vars:
if grad is not None:
clipped_grads.append((tf.clip_by_norm(grad, self.params['clamp_gradient_norm']), var))
else:
clipped_grads.append((grad, var))
grads_for_display = []
for grad, var in grads_and_vars:
if grad is not None:
grads_for_display.append((tf.clip_by_norm(grad, self.params['clamp_gradient_norm']), var))
self.ops['grads'] = grads_for_display
self.ops['train_step'] = optimizer.apply_gradients(clipped_grads)
# Initialize newly-introduced variables:
self.sess.run(tf.local_variables_initializer())
def gated_regression(self, last_h, regression_gate, regression_transform):
raise Exception("Models have to implement gated_regression!")
def prepare_specific_graph_model(self) -> None:
raise Exception("Models have to implement prepare_specific_graph_model!")
def compute_mean_and_logvariance(self):
raise Exception("Models have to implement compute_mean_and_logvariance!")
def sample_with_mean_and_logvariance(self):
raise Exception("Models have to implement sample_with_mean_and_logvariance!")
def construct_logit_matrices(self):
raise Exception("Models have to implement construct_logit_matrices!")
def construct_loss(self):
raise Exception("Models have to implement construct_loss!")
def make_minibatch_iterator(self, data: Any, is_training: bool):
raise Exception("Models have to implement make_minibatch_iterator!")
"""
def save_intermediate_results(self, adjacency_matrix, edge_type_prob, label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels, mean, logvariance):
with open('intermediate_results_%s' % self.params["dataset"], 'wb') as out_file:
pickle.dump([adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels, mean, logvariance], out_file, pickle.HIGHEST_PROTOCOL)
"""
def save_probs(self, all_results):
with open('epoch_prob_matices_%s' % self.params["dataset"], 'wb') as out_file:
pickle.dump([all_results], out_file, pickle.HIGHEST_PROTOCOL)
def run_epoch(self, epoch_name: str, epoch_num, data, is_training: bool):
loss = 0
start_time = time.time()
processed_graphs = 0
batch_iterator = ThreadedIterator(self.make_minibatch_iterator(data, is_training), max_queue_size=5)
for step, batch_data in enumerate(batch_iterator):
num_graphs = batch_data[self.placeholders['num_graphs']]
processed_graphs += num_graphs
batch_data[self.placeholders['is_generative']] = False
# Randomly sample from normal distribution
batch_data[self.placeholders['z_prior']] = utils.generate_std_normal( \
self.params['batch_size'], batch_data[self.placeholders['num_vertices']], self.params['hidden_size'])
if is_training:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = self.params['out_layer_dropout_keep_prob']
fetch_list = [self.ops['loss'], #0 - float
self.ops['train_step'], #1 - None
self.ops["edge_loss"], #2 - shape (batch_size,)
self.ops['kl_loss'], #3 - shape (batch_size,)
self.placeholders['target_values'], #4
self.ops['mean'], #5 shape (batch_size*10, 10)
self.ops['logvariance'], #6 shape (batch_size*10, 10)
self.ops['grads'], #7 - length 154 (????)
self.ops['mean_edge_loss'], #8 - float
self.ops['mean_kl_loss'], #9 - float
]
else:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0
fetch_list = [self.ops['mean_edge_loss']]
result = self.sess.run(fetch_list, feed_dict=batch_data)
"""try:
if is_training:
self.save_intermediate_results(batch_data[self.placeholders['adjacency_matrix']],
result[11], result[12], result[4], result[5], result[9], result[10], result[6], result[7], result[13], result[14])
except IndexError:
pass"""
node_seqs = batch_data[self.placeholders['node_sequence']] # len batch size
Ms = batch_data[self.placeholders['target_values']] # shape (1,batch_size)
batch_size = len(node_seqs)
# for gidx in range(batch_size):
# M = batch_data[self.placeholders['target_values']][0][gidx]
#
# row = []#[m,n,]
# with open("losses_experiments.csv", "a", newline='') as fp:
# wr = csv.writer(fp, dialect='excel')
# wr.writerow(row)
batch_loss = result[0]
loss += batch_loss * num_graphs
print("Running %s, batch %i (has %i graphs). Loss so far: %.4f" % (epoch_name,
step,
num_graphs,
loss / processed_graphs), end='\r')
if processed_graphs != 0:
loss = loss / processed_graphs
instance_per_sec = processed_graphs / (time.time() - start_time)
return loss, instance_per_sec
def generate_new_graphs(self, data):
raise Exception("Models have to implement generate_new_graphs!")
def train(self):
log_to_save = []
total_time_start = time.time()
with self.graph.as_default():
for epoch in range(1, self.params['num_epochs'] + 1):
if not self.params['generation']:
print("== Epoch %i" % epoch)
train_loss, train_speed = self.run_epoch("epoch %i (training)" % epoch, epoch,
self.train_data, True)
print("\r\x1b[K Train: loss: %.5f| instances/sec: %.2f" % (train_loss, train_speed))
valid_loss, valid_speed = self.run_epoch("epoch %i (validation)" % epoch, epoch,
self.valid_data, False)
print("\r\x1b[K Valid: loss: %.5f | instances/sec: %.2f" % (valid_loss, valid_speed))
epoch_time = time.time() - total_time_start
log_entry = {'epoch': epoch, 'time': epoch_time, 'train_results': (train_loss, train_speed),}
log_to_save.append(log_entry)
with open(self.log_file, 'w') as f:
json.dump(log_to_save, f, indent=4)
self.save_model(str(epoch) + ("_%s.pickle" % (self.params["dataset"])))
# Run epochs for graph generation
# if epoch >= self.params['epoch_to_generate']:
# self.generate_new_graphs(self.train_data)
def save_model(self, path: str) -> None:
weights_to_save = {}
for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
assert variable.name not in weights_to_save
weights_to_save[variable.name] = self.sess.run(variable)
data_to_save = {"params": self.params,"weights": weights_to_save}
with open(path, 'wb') as out_file:
pickle.dump(data_to_save, out_file, pickle.HIGHEST_PROTOCOL)
def initialize_model(self) -> None:
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(init_op)
def restore_model(self, path: str) -> None:
print("Restoring weights from file %s." % path)
with open(path, 'rb') as in_file:
data_to_load = pickle.load(in_file)
variables_to_initialize = []
with tf.name_scope("restore"):
restore_ops = []
used_vars = set()
for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
used_vars.add(variable.name)
if variable.name in data_to_load['weights']:
restore_ops.append(variable.assign(data_to_load['weights'][variable.name]))
else:
print('Freshly initializing %s since no saved value was found.' % variable.name)
variables_to_initialize.append(variable)
for var_name in data_to_load['weights']:
if var_name not in used_vars:
print('Saved weights for %s not used by model.' % var_name)
restore_ops.append(tf.variables_initializer(variables_to_initialize))
self.sess.run(restore_ops) | [
"tensorflow.local_variables_initializer",
"tensorflow.transpose",
"tensorflow.set_random_seed",
"tensorflow.variables_initializer",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.placeholder",
"json.dumps",
"numpy.random.seed",
"os.getpid",
"tensorflow.ConfigProto",
"tensorflow.train.Ad... | [((2081, 2145), 'os.path.join', 'os.path.join', (['log_dir', "('%s_log_%s.json' % (self.run_id, dataset))"], {}), "(log_dir, '%s_log_%s.json' % (self.run_id, dataset))\n", (2093, 2145), False, 'import os\n'), ((2177, 2231), 'os.path.join', 'os.path.join', (['log_dir', "('%s_model.pickle' % self.run_id)"], {}), "(log_dir, '%s_model.pickle' % self.run_id)\n", (2189, 2231), False, 'import os\n'), ((2477, 2511), 'random.seed', 'random.seed', (["params['random_seed']"], {}), "(params['random_seed'])\n", (2488, 2511), False, 'import random\n'), ((2520, 2557), 'numpy.random.seed', 'np.random.seed', (["params['random_seed']"], {}), "(params['random_seed'])\n", (2534, 2557), True, 'import numpy as np\n'), ((2888, 2904), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2902, 2904), True, 'import tensorflow as tf\n'), ((2973, 2983), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2981, 2983), True, 'import tensorflow as tf\n'), ((3209, 3252), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph', 'config': 'config'}), '(graph=self.graph, config=config)\n', (3219, 3252), True, 'import tensorflow as tf\n'), ((3923, 3961), 'os.path.join', 'os.path.join', (['self.data_dir', 'file_name'], {}), '(self.data_dir, file_name)\n', (3935, 3961), False, 'import os\n'), ((5536, 5583), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[]'], {'name': '"""num_graphs"""'}), "(tf.int64, [], name='num_graphs')\n", (5550, 5583), True, 'import tensorflow as tf\n'), ((5643, 5709), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""out_layer_dropout_keep_prob"""'}), "(tf.float32, [], name='out_layer_dropout_keep_prob')\n", (5657, 5709), True, 'import tensorflow as tf\n'), ((5896, 5945), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool', '[]'], {'name': '"""is_generative"""'}), "(tf.bool, [], name='is_generative')\n", (5910, 5945), True, 'import tensorflow as tf\n'), ((7752, 7804), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (["self.params['learning_rate']"], {}), "(self.params['learning_rate'])\n", (7774, 7804), True, 'import tensorflow as tf\n'), ((10355, 10366), 'time.time', 'time.time', ([], {}), '()\n', (10364, 10366), False, 'import time\n'), ((13878, 13889), 'time.time', 'time.time', ([], {}), '()\n', (13887, 13889), False, 'import time\n'), ((2343, 2363), 'json.dump', 'json.dump', (['params', 'f'], {}), '(params, f)\n', (2352, 2363), False, 'import json\n'), ((3402, 3443), 'tensorflow.set_random_seed', 'tf.set_random_seed', (["params['random_seed']"], {}), "(params['random_seed'])\n", (3420, 3443), True, 'import tensorflow as tf\n'), ((4072, 4084), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4081, 4084), False, 'import json\n'), ((5960, 5992), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""graph_model"""'], {}), "('graph_model')\n", (5977, 5992), True, 'import tensorflow as tf\n'), ((8579, 8611), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (8609, 8611), True, 'import tensorflow as tf\n'), ((10176, 10237), 'pickle.dump', 'pickle.dump', (['[all_results]', 'out_file', 'pickle.HIGHEST_PROTOCOL'], {}), '([all_results], out_file, pickle.HIGHEST_PROTOCOL)\n', (10187, 10237), False, 'import pickle\n'), ((10853, 10985), 'utils.generate_std_normal', 'utils.generate_std_normal', (["self.params['batch_size']", "batch_data[self.placeholders['num_vertices']]", "self.params['hidden_size']"], {}), "(self.params['batch_size'], batch_data[self.\n placeholders['num_vertices']], self.params['hidden_size'])\n", (10878, 10985), False, 'import utils\n'), ((15711, 15771), 'pickle.dump', 'pickle.dump', (['data_to_save', 'out_file', 'pickle.HIGHEST_PROTOCOL'], {}), '(data_to_save, out_file, pickle.HIGHEST_PROTOCOL)\n', (15722, 15771), False, 'import pickle\n'), ((15840, 15873), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (15871, 15873), True, 'import tensorflow as tf\n'), ((15875, 15907), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (15905, 15907), True, 'import tensorflow as tf\n'), ((16114, 16134), 'pickle.load', 'pickle.load', (['in_file'], {}), '(in_file)\n', (16125, 16134), False, 'import pickle\n'), ((16186, 16210), 'tensorflow.name_scope', 'tf.name_scope', (['"""restore"""'], {}), "('restore')\n", (16199, 16210), True, 'import tensorflow as tf\n'), ((1955, 1989), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M-%S"""'], {}), "('%Y-%m-%d-%H-%M-%S')\n", (1968, 1989), False, 'import time\n'), ((2251, 2318), 'os.path.join', 'os.path.join', (['log_dir', "('%s_params_%s.json' % (self.run_id, dataset))"], {}), "(log_dir, '%s_params_%s.json' % (self.run_id, dataset))\n", (2263, 2318), False, 'import os\n'), ((13625, 13636), 'time.time', 'time.time', ([], {}), '()\n', (13634, 13636), False, 'import time\n'), ((16958, 17007), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['variables_to_initialize'], {}), '(variables_to_initialize)\n', (16982, 17007), True, 'import tensorflow as tf\n'), ((1995, 2006), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2004, 2006), False, 'import os\n'), ((2443, 2466), 'json.dumps', 'json.dumps', (['self.params'], {}), '(self.params)\n', (2453, 2466), False, 'import json\n'), ((6516, 6581), 'tensorflow.transpose', 'tf.transpose', (["self.placeholders['adjacency_matrix']", '[1, 0, 2, 3]'], {}), "(self.placeholders['adjacency_matrix'], [1, 0, 2, 3])\n", (6528, 6581), True, 'import tensorflow as tf\n'), ((6799, 6864), 'tensorflow.transpose', 'tf.transpose', (["self.placeholders['adjacency_matrix']", '[1, 0, 2, 3]'], {}), "(self.placeholders['adjacency_matrix'], [1, 0, 2, 3])\n", (6811, 6864), True, 'import tensorflow as tf\n'), ((8042, 8099), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['grad', "self.params['clamp_gradient_norm']"], {}), "(grad, self.params['clamp_gradient_norm'])\n", (8057, 8099), True, 'import tensorflow as tf\n'), ((8323, 8380), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['grad', "self.params['clamp_gradient_norm']"], {}), "(grad, self.params['clamp_gradient_norm'])\n", (8338, 8380), True, 'import tensorflow as tf\n'), ((14710, 14721), 'time.time', 'time.time', ([], {}), '()\n', (14719, 14721), False, 'import time\n'), ((14987, 15022), 'json.dump', 'json.dump', (['log_to_save', 'f'], {'indent': '(4)'}), '(log_to_save, f, indent=4)\n', (14996, 15022), False, 'import json\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
from pandas.api.types import is_string_dtype, is_numeric_dtype
import logging
import os
import os.path as osp
import numpy as np
import json
from ray.tune.util import flatten_dict
logger = logging.getLogger(__name__)
def _parse_results(res_path):
res_dict = {}
try:
with open(res_path) as f:
# Get last line in file
for line in f:
pass
res_dict = flatten_dict(json.loads(line.strip()))
except Exception:
logger.exception("Importing %s failed...Perhaps empty?" % res_path)
return res_dict
def _parse_configs(cfg_path):
try:
with open(cfg_path) as f:
cfg_dict = flatten_dict(json.load(f))
except Exception:
logger.exception("Config parsing failed.")
return cfg_dict
def _resolve(directory, result_fname):
try:
resultp = osp.join(directory, result_fname)
res_dict = _parse_results(resultp)
cfgp = osp.join(directory, "params.json")
cfg_dict = _parse_configs(cfgp)
cfg_dict.update(res_dict)
return cfg_dict
except Exception:
return None
def load_results_to_df(directory, result_name="result.json"):
exp_directories = [
dirpath for dirpath, dirs, files in os.walk(directory) for f in files
if f == result_name
]
data = [_resolve(d, result_name) for d in exp_directories]
data = [d for d in data if d]
return pd.DataFrame(data)
def generate_plotly_dim_dict(df, field):
dim_dict = {}
dim_dict["label"] = field
column = df[field]
if is_numeric_dtype(column):
dim_dict["values"] = column
elif is_string_dtype(column):
texts = column.unique()
dim_dict["values"] = [
np.argwhere(texts == x).flatten()[0] for x in column
]
dim_dict["tickvals"] = list(range(len(texts)))
dim_dict["ticktext"] = texts
else:
raise Exception("Unidentifiable Type")
return dim_dict
| [
"logging.getLogger",
"pandas.api.types.is_numeric_dtype",
"pandas.api.types.is_string_dtype",
"os.path.join",
"json.load",
"numpy.argwhere",
"pandas.DataFrame",
"os.walk"
] | [((321, 348), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (338, 348), False, 'import logging\n'), ((1563, 1581), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1575, 1581), True, 'import pandas as pd\n'), ((1703, 1727), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['column'], {}), '(column)\n', (1719, 1727), False, 'from pandas.api.types import is_string_dtype, is_numeric_dtype\n'), ((988, 1021), 'os.path.join', 'osp.join', (['directory', 'result_fname'], {}), '(directory, result_fname)\n', (996, 1021), True, 'import os.path as osp\n'), ((1080, 1114), 'os.path.join', 'osp.join', (['directory', '"""params.json"""'], {}), "(directory, 'params.json')\n", (1088, 1114), True, 'import os.path as osp\n'), ((1774, 1797), 'pandas.api.types.is_string_dtype', 'is_string_dtype', (['column'], {}), '(column)\n', (1789, 1797), False, 'from pandas.api.types import is_string_dtype, is_numeric_dtype\n'), ((1387, 1405), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1394, 1405), False, 'import os\n'), ((813, 825), 'json.load', 'json.load', (['f'], {}), '(f)\n', (822, 825), False, 'import json\n'), ((1874, 1897), 'numpy.argwhere', 'np.argwhere', (['(texts == x)'], {}), '(texts == x)\n', (1885, 1897), True, 'import numpy as np\n')] |
"""
Functions related to STAAR
"""
import numpy as np
from scipy.stats import cauchy
c = cauchy()
def cct(pvals, weights=None):
"""
Python port of the CCT function as defined in the STAAR R-package (https://github.com/xihaoli/STAAR/blob/2f67fafec591a45e81a54eca24564b09ce90e252/R/CCT.R)
An analytical p-value combination method using the Cauchy distribution.
takes in a numeric vector of p-values, a numeric vector of non-negative weights, and return the aggregated p-value using Cauchy method.
:param np.ndarray pval: Numpy array containing the p-values
:param np.ndarray weights: Numpy array containing the weights
:return: The aggregated p-value
:rtype: float
<NAME>., & <NAME>. (2020). Cauchy combination test: a powerful test with analytic p-value calculation under arbitrary dependency structures.
<NAME>., et al. (2019). Acat: A fast and powerful p value combination method for rare-variant analysis in sequencing studies.
"""
# check for NA
assert not np.isnan(pvals).any(), 'Error: Cannot have nan in the p-values!'
# check range
assert not (((pvals < 0).sum() + (pvals > 1).sum()) > 0), "Error: All p-values must be between 0 and 1"
# check for p-values that are either exactly 0 or 1
is_zero = (pvals == 0.).any()
is_one = (pvals == 1.).any()
assert not (is_zero & is_one), 'Error: Cannot have both 0 and 1 p-values'
if is_zero:
return 0
if is_one:
print('Warning: there are p-values that are exactly 1!')
return 1.
# check the validity of weights
if weights is None:
weights = np.ones_like(pvals) / len(pvals)
else:
assert len(weights) == len(pvals), 'Error: length of weights should be the same as that of the p-values!'
assert not ((weights < 0).any()), 'Error: all weights must be positive!'
weights /= weights.sum()
# check if there are very small non-zero p-values
is_small = pvals < 1e-16
if not is_small.any():
cct_stat = (weights * np.tan((0.5 - pvals) * np.pi)).sum()
else:
cct_stat = (weights[is_small] / pvals[is_small] / np.pi).sum()
cct_stat += (weights[~is_small] * np.tan((0.5 - pvals[~is_small]) * np.pi)).sum()
# check if the test statistic is very large
if (cct_stat > 1e15):
pval = (1. / cct_stat) / np.pi
else:
pval = c.sf(cct_stat)
return pval
| [
"scipy.stats.cauchy",
"numpy.isnan",
"numpy.ones_like",
"numpy.tan"
] | [((94, 102), 'scipy.stats.cauchy', 'cauchy', ([], {}), '()\n', (100, 102), False, 'from scipy.stats import cauchy\n'), ((1630, 1649), 'numpy.ones_like', 'np.ones_like', (['pvals'], {}), '(pvals)\n', (1642, 1649), True, 'import numpy as np\n'), ((1025, 1040), 'numpy.isnan', 'np.isnan', (['pvals'], {}), '(pvals)\n', (1033, 1040), True, 'import numpy as np\n'), ((2042, 2071), 'numpy.tan', 'np.tan', (['((0.5 - pvals) * np.pi)'], {}), '((0.5 - pvals) * np.pi)\n', (2048, 2071), True, 'import numpy as np\n'), ((2202, 2242), 'numpy.tan', 'np.tan', (['((0.5 - pvals[~is_small]) * np.pi)'], {}), '((0.5 - pvals[~is_small]) * np.pi)\n', (2208, 2242), True, 'import numpy as np\n')] |
import matlab.engine
import argparse
import torch
from torch.autograd import Variable
import numpy as np
import time, math, glob
import scipy.io as sio
import cv2
parser = argparse.ArgumentParser(description="PyTorch EDSR Eval")
parser.add_argument("--cuda", action="store_true", help="use cuda?")
parser.add_argument("--model", default="checkpoint/model_edsr.pth", type=str, help="model path")
parser.add_argument("--dataset", default="Set5", type=str, help="dataset name, Default: Set5")
parser.add_argument("--scale", default=4, type=int, help="scale factor, Default: 4")
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
opt = parser.parse_args()
cuda = opt.cuda
eng = matlab.engine.start_matlab()
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
model = torch.load(opt.model)["model"]
image_list = glob.glob(opt.dataset+"/*.*")
avg_psnr_predicted = 0.0
avg_psnr_bicubic = 0.0
avg_elapsed_time = 0.0
for image_name in image_list:
print("Processing ", image_name)
im_gt_y = sio.loadmat(image_name)['im_gt_y']
im_b_y = sio.loadmat(image_name)['im_b_y']
im_l = sio.loadmat(image_name)['im_l']
im_gt_y = im_gt_y.astype(float)
im_b_y = im_b_y.astype(float)
im_l = im_l.astype(float)
psnr_bicubic = PSNR(im_gt_y, im_b_y,shave_border=opt.scale)
avg_psnr_bicubic += psnr_bicubic
im_input = im_l.astype(np.float32).transpose(2,0,1)
im_input = im_input.reshape(1,im_input.shape[0],im_input.shape[1],im_input.shape[2])
im_input = Variable(torch.from_numpy(im_input/255.).float())
if cuda:
model = model.cuda()
im_input = im_input.cuda()
else:
model = model.cpu()
start_time = time.time()
HR_4x = model(im_input)
elapsed_time = time.time() - start_time
avg_elapsed_time += elapsed_time
HR_4x = HR_4x.cpu()
im_h = HR_4x.data[0].numpy().astype(np.float32)
im_h = im_h*255.
im_h = np.clip(im_h, 0., 255.)
im_h = im_h.transpose(1,2,0).astype(np.float32)
im_h_matlab = matlab.double((im_h / 255.).tolist())
im_h_ycbcr = eng.rgb2ycbcr(im_h_matlab)
im_h_ycbcr = np.array(im_h_ycbcr._data).reshape(im_h_ycbcr.size, order='F').astype(np.float32) * 255.
im_h_y = im_h_ycbcr[:,:,0]
psnr_predicted = PSNR(im_gt_y, im_h_y,shave_border=opt.scale)
avg_psnr_predicted += psnr_predicted
print("Scale=", opt.scale)
print("Dataset=", opt.dataset)
print("PSNR_predicted=", avg_psnr_predicted/len(image_list))
print("PSNR_bicubic=", avg_psnr_bicubic/len(image_list))
print("It takes average {}s for processing".format(avg_elapsed_time/len(image_list)))
| [
"numpy.clip",
"numpy.mean",
"argparse.ArgumentParser",
"torch.load",
"scipy.io.loadmat",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"math.log10",
"time.time",
"glob.glob"
] | [((182, 238), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch EDSR Eval"""'}), "(description='PyTorch EDSR Eval')\n", (205, 238), False, 'import argparse\n'), ((1233, 1264), 'glob.glob', 'glob.glob', (["(opt.dataset + '/*.*')"], {}), "(opt.dataset + '/*.*')\n", (1242, 1264), False, 'import time, math, glob\n'), ((1186, 1207), 'torch.load', 'torch.load', (['opt.model'], {}), '(opt.model)\n', (1196, 1207), False, 'import torch\n'), ((2120, 2131), 'time.time', 'time.time', ([], {}), '()\n', (2129, 2131), False, 'import time, math, glob\n'), ((2362, 2387), 'numpy.clip', 'np.clip', (['im_h', '(0.0)', '(255.0)'], {}), '(im_h, 0.0, 255.0)\n', (2369, 2387), True, 'import numpy as np\n'), ((881, 900), 'numpy.mean', 'np.mean', (['(imdff ** 2)'], {}), '(imdff ** 2)\n', (888, 900), True, 'import numpy as np\n'), ((958, 982), 'math.log10', 'math.log10', (['(255.0 / rmse)'], {}), '(255.0 / rmse)\n', (968, 982), False, 'import time, math, glob\n'), ((1084, 1109), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1107, 1109), False, 'import torch\n'), ((1426, 1449), 'scipy.io.loadmat', 'sio.loadmat', (['image_name'], {}), '(image_name)\n', (1437, 1449), True, 'import scipy.io as sio\n'), ((1475, 1498), 'scipy.io.loadmat', 'sio.loadmat', (['image_name'], {}), '(image_name)\n', (1486, 1498), True, 'import scipy.io as sio\n'), ((1521, 1544), 'scipy.io.loadmat', 'sio.loadmat', (['image_name'], {}), '(image_name)\n', (1532, 1544), True, 'import scipy.io as sio\n'), ((2181, 2192), 'time.time', 'time.time', ([], {}), '()\n', (2190, 2192), False, 'import time, math, glob\n'), ((1937, 1971), 'torch.from_numpy', 'torch.from_numpy', (['(im_input / 255.0)'], {}), '(im_input / 255.0)\n', (1953, 1971), False, 'import torch\n'), ((2561, 2587), 'numpy.array', 'np.array', (['im_h_ycbcr._data'], {}), '(im_h_ycbcr._data)\n', (2569, 2587), True, 'import numpy as np\n')] |
import subprocess
import numpy as np
from matplotlib import pyplot as plt
import os
cmd = f'go run main.go'.replace('\\', '/')
print(cmd)
subprocess.check_output(cmd, shell=True)
data = np.genfromtxt('out.csv', delimiter=",")
print(data)
plt.plot(data[:, 0], data[:, 1], label="Track")
plt.plot(data[:, 2], data[:, 3], marker=".", label="Measure")
plt.plot(data[:, 4], data[:, 5], marker="*", label="CKF")
plt.title("Cubature Kalman Filter")
plt.legend(loc=2)
plt.tight_layout()
plt.xlabel("x(m)")
plt.ylabel("y(m)")
plt.grid(True)
plt.show()
os.remove('out.csv') | [
"subprocess.check_output",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.remove",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"numpy.genfromtxt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((140, 180), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (163, 180), False, 'import subprocess\n'), ((189, 228), 'numpy.genfromtxt', 'np.genfromtxt', (['"""out.csv"""'], {'delimiter': '""","""'}), "('out.csv', delimiter=',')\n", (202, 228), True, 'import numpy as np\n'), ((243, 290), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 0]', 'data[:, 1]'], {'label': '"""Track"""'}), "(data[:, 0], data[:, 1], label='Track')\n", (251, 290), True, 'from matplotlib import pyplot as plt\n'), ((291, 352), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 2]', 'data[:, 3]'], {'marker': '"""."""', 'label': '"""Measure"""'}), "(data[:, 2], data[:, 3], marker='.', label='Measure')\n", (299, 352), True, 'from matplotlib import pyplot as plt\n'), ((353, 410), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 4]', 'data[:, 5]'], {'marker': '"""*"""', 'label': '"""CKF"""'}), "(data[:, 4], data[:, 5], marker='*', label='CKF')\n", (361, 410), True, 'from matplotlib import pyplot as plt\n'), ((411, 446), 'matplotlib.pyplot.title', 'plt.title', (['"""Cubature Kalman Filter"""'], {}), "('Cubature Kalman Filter')\n", (420, 446), True, 'from matplotlib import pyplot as plt\n'), ((447, 464), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (457, 464), True, 'from matplotlib import pyplot as plt\n'), ((465, 483), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (481, 483), True, 'from matplotlib import pyplot as plt\n'), ((484, 502), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x(m)"""'], {}), "('x(m)')\n", (494, 502), True, 'from matplotlib import pyplot as plt\n'), ((503, 521), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y(m)"""'], {}), "('y(m)')\n", (513, 521), True, 'from matplotlib import pyplot as plt\n'), ((522, 536), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (530, 536), True, 'from matplotlib import pyplot as plt\n'), ((538, 548), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (546, 548), True, 'from matplotlib import pyplot as plt\n'), ((551, 571), 'os.remove', 'os.remove', (['"""out.csv"""'], {}), "('out.csv')\n", (560, 571), False, 'import os\n')] |
# Authors: <NAME> <<EMAIL>>
# License: BSD
import glob
import os.path as op
import numpy as np
import pytest
from mne import what, create_info
from mne.datasets import testing
from mne.io import RawArray
from mne.preprocessing import ICA
from mne.utils import requires_sklearn
data_path = testing.data_path(download=False)
@pytest.mark.slowtest
@requires_sklearn
@testing.requires_testing_data
def test_what(tmp_path, verbose_debug):
"""Test mne.what."""
# ICA
ica = ICA(max_iter=1)
raw = RawArray(np.random.RandomState(0).randn(3, 10),
create_info(3, 1000., 'eeg'))
with pytest.warns(None): # convergence sometimes
ica.fit(raw)
fname = op.join(str(tmp_path), 'x-ica.fif')
ica.save(fname)
assert what(fname) == 'ica'
# test files
fnames = glob.glob(
op.join(data_path, 'MEG', 'sample', '*.fif'))
fnames += glob.glob(
op.join(data_path, 'subjects', 'sample', 'bem', '*.fif'))
fnames = sorted(fnames)
want_dict = dict(eve='events', ave='evoked', cov='cov', inv='inverse',
fwd='forward', trans='transform', proj='proj',
raw='raw', meg='raw', sol='bem solution',
bem='bem surfaces', src='src', dense='bem surfaces',
sparse='bem surfaces', head='bem surfaces',
fiducials='fiducials')
for fname in fnames:
kind = op.splitext(fname)[0].split('-')[-1]
if len(kind) > 5:
kind = kind.split('_')[-1]
this = what(fname)
assert this == want_dict[kind]
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave_xfit.dip')
assert what(fname) == 'unknown'
| [
"mne.datasets.testing.data_path",
"mne.create_info",
"os.path.join",
"os.path.splitext",
"mne.what",
"mne.preprocessing.ICA",
"numpy.random.RandomState",
"pytest.warns"
] | [((293, 326), 'mne.datasets.testing.data_path', 'testing.data_path', ([], {'download': '(False)'}), '(download=False)\n', (310, 326), False, 'from mne.datasets import testing\n'), ((485, 500), 'mne.preprocessing.ICA', 'ICA', ([], {'max_iter': '(1)'}), '(max_iter=1)\n', (488, 500), False, 'from mne.preprocessing import ICA\n'), ((1606, 1671), 'os.path.join', 'op.join', (['data_path', '"""MEG"""', '"""sample"""', '"""sample_audvis-ave_xfit.dip"""'], {}), "(data_path, 'MEG', 'sample', 'sample_audvis-ave_xfit.dip')\n", (1613, 1671), True, 'import os.path as op\n'), ((578, 607), 'mne.create_info', 'create_info', (['(3)', '(1000.0)', '"""eeg"""'], {}), "(3, 1000.0, 'eeg')\n", (589, 607), False, 'from mne import what, create_info\n'), ((617, 635), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (629, 635), False, 'import pytest\n'), ((762, 773), 'mne.what', 'what', (['fname'], {}), '(fname)\n', (766, 773), False, 'from mne import what, create_info\n'), ((832, 876), 'os.path.join', 'op.join', (['data_path', '"""MEG"""', '"""sample"""', '"""*.fif"""'], {}), "(data_path, 'MEG', 'sample', '*.fif')\n", (839, 876), True, 'import os.path as op\n'), ((911, 967), 'os.path.join', 'op.join', (['data_path', '"""subjects"""', '"""sample"""', '"""bem"""', '"""*.fif"""'], {}), "(data_path, 'subjects', 'sample', 'bem', '*.fif')\n", (918, 967), True, 'import os.path as op\n'), ((1543, 1554), 'mne.what', 'what', (['fname'], {}), '(fname)\n', (1547, 1554), False, 'from mne import what, create_info\n'), ((1683, 1694), 'mne.what', 'what', (['fname'], {}), '(fname)\n', (1687, 1694), False, 'from mne import what, create_info\n'), ((520, 544), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (541, 544), True, 'import numpy as np\n'), ((1426, 1444), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (1437, 1444), True, 'import os.path as op\n')] |
import pandas as pd
import pickle
import numpy as np
from keras import backend as K
import tensorflow as tf
import os
from tune_hyperparameters import TuneNeuralNet
# Load training data and stopwords
train_data = pd.read_pickle('../../../data/train_data.pkl')
with open('../../../data/stopwords.pkl', 'rb') as f:
stopwords = pickle.load(f)
# This is the number of physical cores
NUM_PARALLEL_EXEC_UNITS = 12
config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=NUM_PARALLEL_EXEC_UNITS, inter_op_parallelism_threads=2,
allow_soft_placement=True, device_count={'CPU': NUM_PARALLEL_EXEC_UNITS})
session = tf.compat.v1.Session(config=config)
K.set_session(session)
os.environ["OMP_NUM_THREADS"] = "12"
os.environ["KMP_BLOCKTIME"] = "30"
os.environ["KMP_SETTINGS"] = "1"
os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
nn_params = {
'ngram_range':[(1,1),(1,2),(2,2)],
'max_df':np.linspace(0, 1, 5),
'min_df':np.linspace(0, 1, 5),
'h1_nodes':[128, 512, 1024, 2048, 3200],
'optimizer':['Adam','RMSprop','Adadelta']
}
# May need to edit batch_size to a smaller size to lessen memory load.
tune_nn = TuneNeuralNet(train_data, 3, stopwords, 'nn')
tune_nn.tune_parameters(nn_params, 'count')
tune_nn.tune_parameters(nn_params, 'tfidf')
tune_nn.save_scores_csv('nn')
| [
"pandas.read_pickle",
"tensorflow.compat.v1.ConfigProto",
"tune_hyperparameters.TuneNeuralNet",
"keras.backend.set_session",
"pickle.load",
"numpy.linspace",
"tensorflow.compat.v1.Session"
] | [((214, 260), 'pandas.read_pickle', 'pd.read_pickle', (['"""../../../data/train_data.pkl"""'], {}), "('../../../data/train_data.pkl')\n", (228, 260), True, 'import pandas as pd\n'), ((423, 616), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {'intra_op_parallelism_threads': 'NUM_PARALLEL_EXEC_UNITS', 'inter_op_parallelism_threads': '(2)', 'allow_soft_placement': '(True)', 'device_count': "{'CPU': NUM_PARALLEL_EXEC_UNITS}"}), "(intra_op_parallelism_threads=\n NUM_PARALLEL_EXEC_UNITS, inter_op_parallelism_threads=2,\n allow_soft_placement=True, device_count={'CPU': NUM_PARALLEL_EXEC_UNITS})\n", (447, 616), True, 'import tensorflow as tf\n'), ((653, 688), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (673, 688), True, 'import tensorflow as tf\n'), ((690, 712), 'keras.backend.set_session', 'K.set_session', (['session'], {}), '(session)\n', (703, 712), True, 'from keras import backend as K\n'), ((1185, 1230), 'tune_hyperparameters.TuneNeuralNet', 'TuneNeuralNet', (['train_data', '(3)', 'stopwords', '"""nn"""'], {}), "(train_data, 3, stopwords, 'nn')\n", (1198, 1230), False, 'from tune_hyperparameters import TuneNeuralNet\n'), ((330, 344), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (341, 344), False, 'import pickle\n'), ((953, 973), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (964, 973), True, 'import numpy as np\n'), ((988, 1008), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (999, 1008), True, 'import numpy as np\n')] |
from typing import Any, List, Optional
import numpy as np
from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges
class MolFeatureExtractionError(Exception):
pass
def one_hot(x: Any, allowable_set: List[Any]) -> List[int]:
"""One hot encode labels.
If label `x` is not included in the set, set the value to the last
element in the list. TODO: Is this true? How else can we use the
last elem in the list.
Params:
-------
x: Any
Label to one hot encode.
allowed_set: list of Any
All possible values the label can have.
Returns:
--------
vec: list of int
One hot encoded vector of the features with the label `x` as
the `True` label.
Examples:
---------
```python
>>> one_hot(x='Si', allowable_set=['C', 'O', 'N', 'S', 'Cl', 'F',
... 'Br', 'P', 'I', 'Si', 'B', 'Na', 'Sn', 'Se', 'other'])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
```
"""
# Use last index of set if x is not in set
if x not in allowable_set:
x = allowable_set[:-1]
return list(map(lambda s: int(x == s), allowable_set))
def check_num_atoms(mol: rdchem.Mol, max_num_atoms: Optional[int]=-1) -> None:
"""Check number of atoms in `mol` does not exceed `max_num_atoms`.
If number of atoms in `mol` exceeds the number `max_num_atoms`, it
will raise `MolFeatureExtractionError` exception.
Params:
-------
mol: rdkit.Chem.rdchem.Mol
The molecule to check.
num_max_atoms: int, optional , default=-1
Maximum allowed number of atoms in a molecule. If negative,
check passes unconditionally.
"""
num_atoms = mol.GetNumAtoms()
if max_num_atoms >= 0 and num_atoms > max_num_atoms:
raise MolFeatureExtractionError("Atoms in mol (N={}) exceeds " \
"num_max_atoms (N={}).".format(num_atoms, max_num_atoms))
def construct_mol_features(mol: rdchem.Mol, out_size: Optional[int]=-1) -> np.ndarray:
"""Returns the atom features of all the atoms in the molecule.
Params:
-------
mol: rdkit.Chem.rdchem.Mol
Molecule of interest.
out_size: int, optional, default=-1
The size of the returned array. If this option is negative, it
does not take any effect. Otherwise, it must be larger than or
equal to the number of atoms in the input molecule. If so, the
end of the array is padded with zeros.
Returns:
--------
mol_feats: np.ndarray, shape=(n,m)
Where `n` is the total number of atoms within the molecule, and
`m` is the number of feats.
"""
# Caluclate charges and chirality of atoms within molecule
rdPartialCharges.ComputeGasteigerCharges(mol) # stored under _GasteigerCharge
rdmolops.AssignStereochemistry(mol) # stored under _CIPCode, see doc for more info
# Retrieve atom index locations of matches
HYDROGEN_DONOR = rdmolfiles.MolFromSmarts("[$([N;!H0;v3,v4&+1]),$([O,S;H1;+0])" +
",n&H1&+0]")
HYROGEN_ACCEPTOR = rdmolfiles.MolFromSmarts("[$([O,S;H1;v2;!$(*-*=[O,N,P,S])])" +
",$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=[O,N,P,S])]),n&H0&+0," +
"$([o,s;+0;!$([o,s]:n);!$([o,s]:c:n)])]")
ACIDIC = rdmolfiles.MolFromSmarts("[$([C,S](=[O,S,P])-[O;H1,-1])]")
BASIC = rdmolfiles.MolFromSmarts("[#7;+,$([N;H2&+0][$([C,a]);!$([C,a](=O))])" +
",$([N;H1&+0]([$([C,a]);!$([C,a](=O))])[$([C,a]);!$([C,a](=O))])," +
"$([N;H0&+0]([C;!$(C(=O))])([C;!$(C(=O))])[C;!$(C(=O))])]")
hydrogen_donor_match = sum(mol.GetSubstructMatches(HYDROGEN_DONOR), ())
hydrogen_acceptor_match = sum(mol.GetSubstructMatches(HYROGEN_ACCEPTOR), ())
acidic_match = sum(mol.GetSubstructMatches(ACIDIC), ())
basic_match = sum(mol.GetSubstructMatches(BASIC), ())
# Get ring info
ring = mol.GetRingInfo()
mol_feats = []
n_atoms = mol.GetNumAtoms()
for atom_idx in range(n_atoms):
atom = mol.GetAtomWithIdx(atom_idx)
atom_feats = []
atom_feats += one_hot(atom.GetSymbol(), ['C', 'O', 'N', 'S', 'Cl', 'F', 'Br', 'P',
'I', 'Si', 'B', 'Na', 'Sn', 'Se', 'other'])
atom_feats += one_hot(atom.GetDegree(), [1,2,3,4,5,6])
atom_feats += one_hot(atom.GetHybridization(), list(rdchem.HybridizationType.names.values()))
atom_feats += one_hot(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6])
atom_feats += one_hot(atom.GetFormalCharge(), [-3, -2, -1, 0, 1, 2, 3])
g_charge = float(atom.GetProp("_GasteigerCharge"))
atom_feats += [g_charge] if not np.isnan(g_charge) else [0.]
atom_feats += [atom.GetIsAromatic()]
atom_feats += [ring.IsAtomInRingOfSize(atom_idx, size) for size in range(3,9)]
atom_feats += one_hot(atom.GetTotalNumHs(), [0, 1, 2, 3, 4])
# Chirality
try:
atom_feats += one_hot(atom.GetProp('_CIPCode'), ["R", "S"]) + [atom.HasProp("_ChiralityPossible")]
except:
atom_feats += [False, False] + [atom.HasProp("_ChiralityPossible")]
# Hydrogen bonding
atom_feats += [atom_idx in hydrogen_donor_match]
atom_feats += [atom_idx in hydrogen_acceptor_match]
# Is Acidic/Basic
atom_feats += [atom_idx in acidic_match]
atom_feats += [atom_idx in basic_match]
mol_feats.append(atom_feats)
if out_size < 0:
return np.array(mol_feats, dtype=np.float)
elif out_size >= n_atoms:
# 'empty' padding for `mol_feats`. Generate(s) feature matrix of same size for all mols
# NOTE: len(mol_feats[0]) is the number of feats
padded_mol_feats = np.zeros((out_size, len(mol_feats[0])), dtype=np.float)
padded_mol_feats[:n_atoms] = np.array(mol_feats, dtype=np.float)
return padded_mol_feats
else:
raise ValueError('`out_size` (N={}) must be negative or larger than or '
'equal to the number of atoms in the input molecules (N={}).'.format(out_size, n_atoms))
def construct_adj_matrix(mol: rdchem.Mol,
out_size: Optional[int]=-1,
add_self_loops: Optional[bool]=True,
normalize: Optional[bool]=True) -> np.ndarray:
"""Returns the adjacency matrix of the molecule.
Normalization of the matrix is highly recommened. When we apply a
layer propogation rule defined by,
.. ::math: `f(H^{(l)}, A) = \\sigma(A H^{(l)} W^{(l)})
multiplication with `A` will completely change the scale of the
features vectors, which we can observe by looking into the eigenvals
of A. By performing :math: `D^{-1}A`, where `D` is the diagonal
degree node matrix, the rows become normalized to 1. However, in
practice, it is better to use symmetric normalization (i.e.
:math:`D^{-\\frac{1/2}} \\hat{A} D^{-\\frac{1/2}}) as that has been
observed to yield better results.
Additionally, when multiplying by `A`, for every node, we sum up
all the feature vectors of all neighboring nodes but not the node
itself (unless there are self-loops in the graph). We can "fix" this
by adding self-loops in the graph: aka add an identity matrix `I` to `A`.
See https://tkipf.github.io/graph-convolutional-networks/ for a
more in-depth overview.
Params:
-------
mol: rdkit.Chem.rdchem.Mol
Molecule of interest.
out_size: int, optional, default=-1
The size of the returned array. If this option is negative, it
does not take any effect. Otherwise, it must be larger than or
equal to the number of atoms in the input molecule. If so, the
end of the array is padded with zeros.
add_self_loops: bool, optional, default=True
Whether or not to add the `I` matrix (aka self-connections).
If normalize is True, this option is ignored.
normalize: bool, optional, default=True
Whether or not to normalize the matrix. If `True`, the diagonal
elements are filled with 1, and symmetric normalization is
performed: :math:`D^{-\\frac{1/2}} * \\hat{A} * D^{-\\frac{1/2}}`
Returns:
--------
adj: np.ndarray
Adjacency matrix of input molecule. If `out_size` is non-negative,
the returned matrix is equal to that value. Otherwise, it is
equal to the number of atoms in the the molecule.
"""
adj = rdmolops.GetAdjacencyMatrix(mol)
s1, s2 = adj.shape # shape=(n_atoms, n_atoms)
# Normalize using D^(-1/2) * A_hat * D^(-1/2)
if normalize:
adj = adj + np.eye(s1)
degree = np.array(adj.sum(1))
deg_inv_sqrt = np.power(degree, -0.5)
deg_inv_sqrt[np.isinf(deg_inv_sqrt)] = 0.
deg_inv_sqrt = np.diag(deg_inv_sqrt)
adj = deg_inv_sqrt
elif add_self_loops:
adj = adj + np.eye(s1)
if out_size < 0:
return adj
elif out_size >= s1:
# 'empty' padding for `adj`. Useful to generate adj matrix of same size for all mols
padded_adj = np.zeros(shape=(out_size, out_size), dtype=np.float)
padded_adj[:s1, :s2] = adj
return padded_adj
else:
raise ValueError('`out_size` (N={}) must be negative or larger than or equal to the '
'number of atoms in the input molecules (N={}).'.format(out_size, s1))
def construct_pos_matrix(mol: rdchem.Mol, out_size: Optional[int]=-1) -> np.ndarray:
"""Construct relative positions from each atom within the molecule.
Params:
-------
mol: rdkit.Chem.rdchem.Mol
Molecule of interest.
out_size: int, optional, default=-1
The size of the returned array. If this option is negative, it
does not take any effect. Otherwise, it must be larger than or
equal to the number of atoms in the input molecule. If so, the
end of the array is padded with zeros.
Returns:
--------
pos_matrix: np.ndarray, shape=(n,n,3)
Relative position (XYZ) coordinates from one atom the others in
the mol.
Examples:
---------
```python
>>> from rdkit import Chem
>>> from rdkit.Chem import AllChem
>>> smiles = 'N[C@@]([H])([C@]([H])(O2)C)C(=O)N[C@@]([H])(CC(=O)N)C(=O)N[C@@]([H])([C@]([H])' \
'(O)C)C(=O)N[C@@]([H])(Cc1ccc(O)cc1)C(=O)2'
>>> mol = Chem.MolFromSmiles(smiles)
>>> mol = Chem.AddHs(mol, addCoords=True)
>>> AllChem.EmbedMolecule(mol, AllChem.ETKDG())
>>> mol = Chem.RemoveHs(mol)
>>> pos_matrix = construct_pos_matrix(mol, out_size=-1)
>>> pos_matrix.shape
(34,34,3)
>>> pos_matrix = construct_pos_matrix(mol, out_size=49)
>>> pos_matrix.shape
(49,49,3)
```
"""
# Obtain initial distance geometry between atoms, if unavilable
if mol.GetNumConformers() == 0:
mol = rdmolops.AddHs(mol, addCoords=True)
rdDistGeom.EmbedMolecule(mol, rdDistGeom.ETKDG())
mol = rdmolops.RemoveHs(mol)
coords = mol.GetConformer().GetPositions() # shape=(N,3)
N = mol.GetNumAtoms()
# Determine appropiate output size to generate feature matrix of same size for all mols.
if out_size < 0:
size = N
elif out_size >= N:
size = out_size
else:
raise ValueError('`out_size` (N={}) is smaller than number of atoms in mol (N={})'.
format(out_size, N))
pos_matrix = np.zeros(shape=(size, size, 3), dtype=np.float)
for atom_idx in range(N):
atom_pos = coords[atom_idx] # central atom of interest
for neighbor_idx in range(N):
neigh_pos = coords[neighbor_idx] # neighboring atom
pos_matrix[atom_idx, neighbor_idx] = atom_pos - neigh_pos # dist between neighbor -> center
return pos_matrix
| [
"numpy.eye",
"rdkit.Chem.rdPartialCharges.ComputeGasteigerCharges",
"numpy.power",
"rdkit.Chem.rdmolops.AssignStereochemistry",
"rdkit.Chem.rdmolops.RemoveHs",
"rdkit.Chem.rdmolops.GetAdjacencyMatrix",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"rdkit.Chem.rdchem.HybridizationType.names.values",
... | [((2724, 2769), 'rdkit.Chem.rdPartialCharges.ComputeGasteigerCharges', 'rdPartialCharges.ComputeGasteigerCharges', (['mol'], {}), '(mol)\n', (2764, 2769), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((2806, 2841), 'rdkit.Chem.rdmolops.AssignStereochemistry', 'rdmolops.AssignStereochemistry', (['mol'], {}), '(mol)\n', (2836, 2841), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((2958, 3035), 'rdkit.Chem.rdmolfiles.MolFromSmarts', 'rdmolfiles.MolFromSmarts', (["('[$([N;!H0;v3,v4&+1]),$([O,S;H1;+0])' + ',n&H1&+0]')"], {}), "('[$([N;!H0;v3,v4&+1]),$([O,S;H1;+0])' + ',n&H1&+0]')\n", (2982, 3035), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((3068, 3248), 'rdkit.Chem.rdmolfiles.MolFromSmarts', 'rdmolfiles.MolFromSmarts', (["('[$([O,S;H1;v2;!$(*-*=[O,N,P,S])])' +\n ',$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=[O,N,P,S])]),n&H0&+0,' +\n '$([o,s;+0;!$([o,s]:n);!$([o,s]:c:n)])]')"], {}), "('[$([O,S;H1;v2;!$(*-*=[O,N,P,S])])' +\n ',$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=[O,N,P,S])]),n&H0&+0,' +\n '$([o,s;+0;!$([o,s]:n);!$([o,s]:c:n)])]')\n", (3092, 3248), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((3272, 3330), 'rdkit.Chem.rdmolfiles.MolFromSmarts', 'rdmolfiles.MolFromSmarts', (['"""[$([C,S](=[O,S,P])-[O;H1,-1])]"""'], {}), "('[$([C,S](=[O,S,P])-[O;H1,-1])]')\n", (3296, 3330), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((3343, 3551), 'rdkit.Chem.rdmolfiles.MolFromSmarts', 'rdmolfiles.MolFromSmarts', (["('[#7;+,$([N;H2&+0][$([C,a]);!$([C,a](=O))])' +\n ',$([N;H1&+0]([$([C,a]);!$([C,a](=O))])[$([C,a]);!$([C,a](=O))]),' +\n '$([N;H0&+0]([C;!$(C(=O))])([C;!$(C(=O))])[C;!$(C(=O))])]')"], {}), "('[#7;+,$([N;H2&+0][$([C,a]);!$([C,a](=O))])' +\n ',$([N;H1&+0]([$([C,a]);!$([C,a](=O))])[$([C,a]);!$([C,a](=O))]),' +\n '$([N;H0&+0]([C;!$(C(=O))])([C;!$(C(=O))])[C;!$(C(=O))])]')\n", (3367, 3551), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((8470, 8502), 'rdkit.Chem.rdmolops.GetAdjacencyMatrix', 'rdmolops.GetAdjacencyMatrix', (['mol'], {}), '(mol)\n', (8497, 8502), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((11463, 11510), 'numpy.zeros', 'np.zeros', ([], {'shape': '(size, size, 3)', 'dtype': 'np.float'}), '(shape=(size, size, 3), dtype=np.float)\n', (11471, 11510), True, 'import numpy as np\n'), ((5467, 5502), 'numpy.array', 'np.array', (['mol_feats'], {'dtype': 'np.float'}), '(mol_feats, dtype=np.float)\n', (5475, 5502), True, 'import numpy as np\n'), ((8715, 8737), 'numpy.power', 'np.power', (['degree', '(-0.5)'], {}), '(degree, -0.5)\n', (8723, 8737), True, 'import numpy as np\n'), ((8811, 8832), 'numpy.diag', 'np.diag', (['deg_inv_sqrt'], {}), '(deg_inv_sqrt)\n', (8818, 8832), True, 'import numpy as np\n'), ((10899, 10934), 'rdkit.Chem.rdmolops.AddHs', 'rdmolops.AddHs', (['mol'], {'addCoords': '(True)'}), '(mol, addCoords=True)\n', (10913, 10934), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((11007, 11029), 'rdkit.Chem.rdmolops.RemoveHs', 'rdmolops.RemoveHs', (['mol'], {}), '(mol)\n', (11024, 11029), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((5806, 5841), 'numpy.array', 'np.array', (['mol_feats'], {'dtype': 'np.float'}), '(mol_feats, dtype=np.float)\n', (5814, 5841), True, 'import numpy as np\n'), ((8643, 8653), 'numpy.eye', 'np.eye', (['s1'], {}), '(s1)\n', (8649, 8653), True, 'import numpy as np\n'), ((8759, 8781), 'numpy.isinf', 'np.isinf', (['deg_inv_sqrt'], {}), '(deg_inv_sqrt)\n', (8767, 8781), True, 'import numpy as np\n'), ((9096, 9148), 'numpy.zeros', 'np.zeros', ([], {'shape': '(out_size, out_size)', 'dtype': 'np.float'}), '(shape=(out_size, out_size), dtype=np.float)\n', (9104, 9148), True, 'import numpy as np\n'), ((10973, 10991), 'rdkit.Chem.rdDistGeom.ETKDG', 'rdDistGeom.ETKDG', ([], {}), '()\n', (10989, 10991), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((4352, 4391), 'rdkit.Chem.rdchem.HybridizationType.names.values', 'rdchem.HybridizationType.names.values', ([], {}), '()\n', (4389, 4391), False, 'from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges\n'), ((4653, 4671), 'numpy.isnan', 'np.isnan', (['g_charge'], {}), '(g_charge)\n', (4661, 4671), True, 'import numpy as np\n'), ((8905, 8915), 'numpy.eye', 'np.eye', (['s1'], {}), '(s1)\n', (8911, 8915), True, 'import numpy as np\n')] |
import tensorflow as tf
from sparkflow.pipeline_util import PysparkReaderWriter
import numpy as np
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCol, HasPredictionCol, HasLabelCol
from pyspark.ml.base import Estimator
from pyspark.ml import Model
from pyspark.ml.util import Identifiable, MLReadable, MLWritable
from pyspark import keyword_only
from sparkflow.HogwildSparkModel import HogwildSparkModel
from sparkflow.ml_util import convert_weights_to_json, predict_func
from pyspark import SparkContext
import json
def build_optimizer(optimizer_name, learning_rate, optimizer_options):
available_optimizers = {
'adam': tf.train.AdamOptimizer,
'rmsprop': tf.train.RMSPropOptimizer,
'momentum': tf.train.MomentumOptimizer,
'adadelta': tf.train.AdadeltaOptimizer,
'adagrad': tf.train.AdagradOptimizer,
'gradient_descent': tf.train.GradientDescentOptimizer,
'adagrad_da': tf.train.AdagradDAOptimizer,
'ftrl': tf.train.FtrlOptimizer,
'proximal_adagrad': tf.train.ProximalAdagradOptimizer,
'proximal_gradient_descent': tf.train.ProximalGradientDescentOptimizer
}
if optimizer_options is None:
optimizer_options = {
"learning_rate": learning_rate,
"use_locking": False
}
if optimizer_name == 'momentum':
optimizer_options['momentum'] = 0.9
if optimizer_name in available_optimizers:
return available_optimizers[optimizer_name](**optimizer_options)
return available_optimizers['gradient_descent'](**optimizer_options)
def handle_data(data, inp_col, label_col):
if label_col is None:
return np.asarray(data[inp_col])
return np.asarray(data[inp_col]), data[label_col]
class SparkAsyncDLModel(Model, HasInputCol, HasPredictionCol, PysparkReaderWriter, MLReadable, MLWritable, Identifiable):
modelJson = Param(Params._dummy(), "modelJson", "", typeConverter=TypeConverters.toString)
modelWeights = Param(Params._dummy(), "modelWeights", "", typeConverter=TypeConverters.toString)
tfOutput = Param(Params._dummy(), "tfOutput", "", typeConverter=TypeConverters.toString)
tfInput = Param(Params._dummy(), "tfInput", "", typeConverter=TypeConverters.toString)
tfDropout = Param(Params._dummy(), "tfDropout", "", typeConverter=TypeConverters.toString)
toKeepDropout = Param(Params._dummy(), "toKeepDropout", "", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self,
inputCol=None,
modelJson=None,
modelWeights=None,
tfInput=None,
tfOutput=None,
tfDropout=None,
toKeepDropout=None,
predictionCol=None):
super(SparkAsyncDLModel, self).__init__()
self._setDefault(modelJson=None, inputCol='encoded',
predictionCol='predicted', tfOutput=None, tfInput=None,
modelWeights=None, tfDropout=None, toKeepDropout=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self,
inputCol=None,
modelJson=None,
modelWeights=None,
tfInput=None,
tfOutput=None,
tfDropout=None,
toKeepDropout=None,
predictionCol=None):
kwargs = self._input_kwargs
return self._set(**kwargs)
def _transform(self, dataset):
inp = self.getOrDefault(self.inputCol)
out = self.getOrDefault(self.predictionCol)
mod_json = self.getOrDefault(self.modelJson)
mod_weights = self.getOrDefault(self.modelWeights)
tf_input = self.getOrDefault(self.tfInput)
tf_output = self.getOrDefault(self.tfOutput)
tf_dropout = self.getOrDefault(self.tfDropout)
to_keep_dropout = self.getOrDefault(self.toKeepDropout)
return dataset.rdd.mapPartitions(lambda x: predict_func(x, mod_json, out, mod_weights, inp, tf_output, tf_input, tf_dropout, to_keep_dropout)).toDF()
class SparkAsyncDL(Estimator, HasInputCol, HasPredictionCol, HasLabelCol,PysparkReaderWriter, MLReadable, MLWritable, Identifiable):
tensorflowGraph = Param(Params._dummy(), "tensorflowGraph", "", typeConverter=TypeConverters.toString)
tfInput = Param(Params._dummy(), "tfInput", "", typeConverter=TypeConverters.toString)
tfOutput = Param(Params._dummy(), "tfOutput", "", typeConverter=TypeConverters.toString)
tfLabel = Param(Params._dummy(), "tfLabel", "", typeConverter=TypeConverters.toString)
tfOptimizer = Param(Params._dummy(), "tfOptimizer", "", typeConverter=TypeConverters.toString)
tfLearningRate = Param(Params._dummy(), "tfLearningRate", "", typeConverter=TypeConverters.toFloat)
iters = Param(Params._dummy(), "iters", "", typeConverter=TypeConverters.toInt)
partitions = Param(Params._dummy(), "partitions", "", typeConverter=TypeConverters.toInt)
miniBatchSize = Param(Params._dummy(), "miniBatchSize", "", typeConverter=TypeConverters.toInt)
miniStochasticIters = Param(Params._dummy(), "miniStochasticIters", "", typeConverter=TypeConverters.toInt)
verbose = Param(Params._dummy(), "verbose", "", typeConverter=TypeConverters.toInt)
acquireLock = Param(Params._dummy(), "acquireLock", "", typeConverter=TypeConverters.toBoolean)
shufflePerIter = Param(Params._dummy(), "shufflePerIter", "", typeConverter=TypeConverters.toBoolean)
tfDropout = Param(Params._dummy(), "tfDropout", "", typeConverter=TypeConverters.toString)
toKeepDropout = Param(Params._dummy(), "toKeepDropout", "", typeConverter=TypeConverters.toBoolean)
partitionShuffles = Param(Params._dummy(), "partitionShuffles", "", typeConverter=TypeConverters.toInt)
optimizerOptions = Param(Params._dummy(), "optimizerOptions", "", typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self,
inputCol=None,
tensorflowGraph=None,
tfInput=None,
tfLabel=None,
tfOutput=None,
tfOptimizer=None,
tfLearningRate=None,
iters=None,
predictionCol=None,
partitions=None,
miniBatchSize = None,
miniStochasticIters=None,
acquireLock=None,
shufflePerIter=None,
tfDropout=None,
toKeepDropout=None,
verbose=None,
labelCol=None,
partitionShuffles=None,
optimizerOptions=None):
"""
:param inputCol: Spark dataframe inputCol. Similar to other spark ml inputCols
:param tensorflowGraph: The protobuf tensorflow graph. You can use the utility function in graph_utils
to generate the graph for you
:param tfInput: The tensorflow input. This points us to the input variable name that you would like to use
for training
:param tfLabel: The tensorflow label. This is the variable name for the label.
:param tfOutput: The tensorflow raw output. This is for your loss function.
:param tfOptimizer: The optimization function you would like to use for training. Defaults to adam
:param tfLearningRate: Learning rate of the optimization function
:param iters: number of iterations of training
:param predictionCol: The prediction column name on the spark dataframe for transformations
:param partitions: Number of partitions to use for training (recommended on partition per instance)
:param miniBatchSize: size of the mini batch. A size of -1 means train on all rows
:param miniStochasticIters: If using a mini batch, you can choose number of mini iters you would like to do with the
batch size above per epoch. A value of -1 means that you would like to run mini-batches on all data in the partition
:param acquireLock: If you do not want to utilize hogwild training, this will set a lock
:param shufflePerIter: Specifies if you want to shuffle the features after each iteration
:param tfDropout: Specifies the dropout variable. This is important for predictions
:param toKeepDropout: Due to conflicting TF implementations, this specifies whether the dropout function means
to keep a percentage of values or to drop a percentage of values.
:param verbose: Specifies log level of training results
:param labelCol: Label column for training
:param partitionShuffles: This will shuffle your data after iterations are completed, then run again. For example,
if you have 2 partition shuffles and 100 iterations, it will run 100 iterations then reshuffle and run 100 iterations again.
The repartition hits performance and should be used with care.
:param optimizerOptions: Json options to apply to tensorflow optimizers.
"""
super(SparkAsyncDL, self).__init__()
self._setDefault(inputCol='transformed', tensorflowGraph='',
tfInput='x:0', tfLabel=None, tfOutput='out/Sigmoid:0',
tfOptimizer='adam', tfLearningRate=.01, partitions=5,
miniBatchSize=128, miniStochasticIters=-1,
shufflePerIter=True, tfDropout=None, acquireLock=False, verbose=0,
iters=1000, toKeepDropout=False, predictionCol='predicted', labelCol=None,
partitionShuffles=1, optimizerOptions=None)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self,
inputCol=None,
tensorflowGraph=None,
tfInput=None,
tfLabel=None,
tfOutput=None,
tfOptimizer=None,
tfLearningRate=None,
iters=None,
predictionCol=None,
partitions=None,
miniBatchSize = None,
miniStochasticIters=None,
acquireLock=None,
shufflePerIter=None,
tfDropout=None,
toKeepDropout=None,
verbose=None,
labelCol=None,
partitionShuffles=None,
optimizerOptions=None):
kwargs = self._input_kwargs
return self._set(**kwargs)
def getTensorflowGraph(self):
return self.getOrDefault(self.tensorflowGraph)
def getIters(self):
return self.getOrDefault(self.iters)
def getTfInput(self):
return self.getOrDefault(self.tfInput)
def getTfLabel(self):
return self.getOrDefault(self.tfLabel)
def getTfOutput(self):
return self.getOrDefault(self.tfOutput)
def getTfOptimizer(self):
return self.getOrDefault(self.tfOptimizer)
def getTfLearningRate(self):
return self.getOrDefault(self.tfLearningRate)
def getPartitions(self):
return self.getOrDefault(self.partitions)
def getMiniBatchSize(self):
return self.getOrDefault(self.miniBatchSize)
def getMiniStochasticIters(self):
return self.getOrDefault(self.miniStochasticIters)
def getVerbose(self):
return self.getOrDefault(self.verbose)
def getAqcuireLock(self):
return self.getOrDefault(self.acquireLock)
def getShufflePerIter(self):
return self.getOrDefault(self.shufflePerIter)
def getTfDropout(self):
return self.getOrDefault(self.tfDropout)
def getToKeepDropout(self):
return self.getOrDefault(self.toKeepDropout)
def getPartitionShuffles(self):
return self.getOrDefault(self.partitionShuffles)
def getOptimizerOptions(self):
return self.getOrDefault(self.optimizerOptions)
def _fit(self, dataset):
inp_col = self.getInputCol()
graph_json = self.getTensorflowGraph()
iters = self.getIters()
label = self.getLabelCol()
prediction = self.getPredictionCol()
tf_input = self.getTfInput()
tf_label = self.getTfLabel()
tf_output = self.getTfOutput()
optimizer_options = self.getOptimizerOptions()
if optimizer_options is not None:
optimizer_options = json.loads(optimizer_options)
tf_optimizer = build_optimizer(self.getTfOptimizer(), self.getTfLearningRate(), optimizer_options)
partitions = self.getPartitions()
acquire_lock = self.getAqcuireLock()
mbs = self.getMiniBatchSize()
msi = self.getMiniStochasticIters()
verbose = self.getVerbose()
spi = self.getShufflePerIter()
tf_dropout = self.getTfDropout()
to_keep_dropout = self.getToKeepDropout()
partition_shuffles = self.getPartitionShuffles()
df = dataset.rdd.map(lambda x: handle_data(x, inp_col, label))
df = df.coalesce(partitions) if partitions < df.getNumPartitions() else df
spark_model = HogwildSparkModel(
tensorflowGraph=graph_json,
iters=iters,
tfInput=tf_input,
tfLabel=tf_label,
optimizer=tf_optimizer,
master_url=SparkContext._active_spark_context.getConf().get("spark.driver.host").__str__() + ":5000",
acquire_lock=acquire_lock,
mini_batch=mbs,
mini_stochastic_iters=msi,
shuffle=spi,
verbose=verbose,
partition_shuffles=partition_shuffles
)
weights = spark_model.train(df)
json_weights = convert_weights_to_json(weights)
return SparkAsyncDLModel(
inputCol=inp_col,
modelJson=graph_json,
modelWeights=json_weights,
tfOutput=tf_output,
tfInput=tf_input,
tfDropout=tf_dropout,
toKeepDropout=to_keep_dropout,
predictionCol=prediction
)
| [
"json.loads",
"pyspark.SparkContext._active_spark_context.getConf",
"pyspark.ml.param.Params._dummy",
"numpy.asarray",
"sparkflow.ml_util.convert_weights_to_json",
"sparkflow.ml_util.predict_func"
] | [((1728, 1753), 'numpy.asarray', 'np.asarray', (['data[inp_col]'], {}), '(data[inp_col])\n', (1738, 1753), True, 'import numpy as np\n'), ((1765, 1790), 'numpy.asarray', 'np.asarray', (['data[inp_col]'], {}), '(data[inp_col])\n', (1775, 1790), True, 'import numpy as np\n'), ((1955, 1970), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (1968, 1970), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((2053, 2068), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (2066, 2068), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((2150, 2165), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (2163, 2165), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((2242, 2257), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (2255, 2257), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((2335, 2350), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (2348, 2350), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((2434, 2449), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (2447, 2449), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((4355, 4370), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (4368, 4370), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((4454, 4469), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (4467, 4469), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((4546, 4561), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (4559, 4561), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((4638, 4653), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (4651, 4653), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((4733, 4748), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (4746, 4748), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((4835, 4850), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (4848, 4850), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((4930, 4945), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (4943, 4945), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5019, 5034), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5032, 5034), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5116, 5131), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5129, 5131), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5222, 5237), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5235, 5237), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5322, 5337), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5335, 5337), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5414, 5429), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5427, 5429), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5517, 5532), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5530, 5532), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5618, 5633), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5631, 5633), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5717, 5732), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5730, 5732), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5825, 5840), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5838, 5840), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((5932, 5947), 'pyspark.ml.param.Params._dummy', 'Params._dummy', ([], {}), '()\n', (5945, 5947), False, 'from pyspark.ml.param import Param, Params, TypeConverters\n'), ((13795, 13827), 'sparkflow.ml_util.convert_weights_to_json', 'convert_weights_to_json', (['weights'], {}), '(weights)\n', (13818, 13827), False, 'from sparkflow.ml_util import convert_weights_to_json, predict_func\n'), ((12510, 12539), 'json.loads', 'json.loads', (['optimizer_options'], {}), '(optimizer_options)\n', (12520, 12539), False, 'import json\n'), ((4084, 4186), 'sparkflow.ml_util.predict_func', 'predict_func', (['x', 'mod_json', 'out', 'mod_weights', 'inp', 'tf_output', 'tf_input', 'tf_dropout', 'to_keep_dropout'], {}), '(x, mod_json, out, mod_weights, inp, tf_output, tf_input,\n tf_dropout, to_keep_dropout)\n', (4096, 4186), False, 'from sparkflow.ml_util import convert_weights_to_json, predict_func\n'), ((13420, 13464), 'pyspark.SparkContext._active_spark_context.getConf', 'SparkContext._active_spark_context.getConf', ([], {}), '()\n', (13462, 13464), False, 'from pyspark import SparkContext\n')] |
import json
import logging
import os
import sys
from argparse import ArgumentParser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
from transformers import AutoTokenizer
from src.data.bitext import WMT14TransformersDataset
from src.models.nli_trainer import TransformersNLITrainer
parser = ArgumentParser()
parser.add_argument("--experiment_dir", type=str, default="debug")
parser.add_argument("--pretrained_name_or_path", type=str, default="xlm-roberta-base")
parser.add_argument("--model_type", type=str, default="xlm-roberta",
choices=["bert", "roberta", "xlm-roberta"])
parser.add_argument("--reverse_order", action="store_true")
parser.add_argument("--optimized_metric", default="accuracy",
choices=["loss", "accuracy", "binary_f1"])
parser.add_argument("--train_path", type=str, help="Path to training set of WMT14 (tsv)",
default="id_wmt14_de-en_bitext_train_mixeddir.tsv")
parser.add_argument("--dev_path", type=str, help="Path to dev set of WMT14 (tsv)",
default="id_wmt14_de-en_bitext_train_mixeddir.tsv")
parser.add_argument("--test_path", type=str, help="Path to test set of WMT14 (tsv)",
default="id_wmt14_de-en_bitext_train_mixeddir.tsv")
parser.add_argument("--num_epochs", type=int, default=2)
parser.add_argument("--max_seq_len", type=int, default=129)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--learning_rate", type=float, default=2e-5)
parser.add_argument("--early_stopping_rounds", type=int, default=5)
parser.add_argument("--validate_every_n_examples", type=int, default=20_000)
parser.add_argument("--nrows", type=int, default=None)
parser.add_argument("--use_cpu", action="store_true")
if __name__ == "__main__":
args = parser.parse_args()
if not os.path.exists(args.experiment_dir):
os.makedirs(args.experiment_dir)
with open(os.path.join(args.experiment_dir, "experiment_config.json"), "w") as f:
json.dump(vars(args), fp=f, indent=4)
# Set up logging to file and stdout
logger = logging.getLogger()
logger.setLevel(logging.INFO)
for curr_handler in [logging.StreamHandler(sys.stdout),
logging.FileHandler(os.path.join(args.experiment_dir, "experiment.log"))]:
curr_handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s"))
logger.addHandler(curr_handler)
for k, v in vars(args).items():
v_str = str(v)
v_str = f"...{v_str[-(50 - 3):]}" if len(v_str) > 50 else v_str
logging.info(f"|{k:30s}|{v_str:50s}|")
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_name_or_path)
tokenizer.save_pretrained(args.experiment_dir)
train_set = WMT14TransformersDataset(args.train_path, tokenizer=tokenizer,
max_length=args.max_seq_len, return_tensors="pt",
reverse_order=args.reverse_order,
nrows=args.nrows)
dev_set = WMT14TransformersDataset(args.dev_path, tokenizer=tokenizer,
max_length=args.max_seq_len, return_tensors="pt",
reverse_order=args.reverse_order)
test_set = WMT14TransformersDataset(args.test_path, tokenizer=tokenizer,
max_length=args.max_seq_len, return_tensors="pt",
reverse_order=args.reverse_order)
logging.info(f"Loaded {len(train_set)} training examples, "
f"{len(dev_set)} dev examples and "
f"{len(test_set) if test_set is not None else 0} test examples")
trainer = TransformersNLITrainer(args.experiment_dir,
pretrained_model_name_or_path=args.pretrained_name_or_path,
num_labels=len(train_set.label_names),
batch_size=args.batch_size,
learning_rate=args.learning_rate,
validate_every_n_steps=args.validate_every_n_examples,
early_stopping_tol=args.early_stopping_rounds,
optimized_metric=args.optimized_metric,
device=("cuda" if not args.use_cpu else "cpu"))
trainer.run(train_dataset=train_set, val_dataset=dev_set, num_epochs=args.num_epochs)
trainer = TransformersNLITrainer.from_pretrained(args.experiment_dir)
test_res = trainer.evaluate(test_set)
np_labels = test_set.labels.numpy()
model_metrics = {}
# Warning: threshold is specified for "not_translation" label, but pred=1 indicates a translation!
for curr_thresh in ["argmax", 0.75, 0.9]:
if curr_thresh == "argmax":
curr_pred = test_res["pred_label"].numpy()
else:
curr_pred = np.logical_not(
test_res["pred_proba"][:, test_set.label2idx["not_translation"]].numpy() > curr_thresh
).astype(np.int32)
conf_matrix = confusion_matrix(y_true=np_labels, y_pred=curr_pred)
plt.matshow(conf_matrix, cmap="Blues")
for (i, j), v in np.ndenumerate(conf_matrix):
plt.text(j, i, v, ha='center', va='center',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='0.3'))
plt.xticks([0, 1], test_set.label_names)
plt.yticks([0, 1], test_set.label_names)
plt.xlabel("(y_pred)")
plt.savefig(os.path.join(args.experiment_dir, f"confusion_matrix_{curr_thresh}.png"))
logging.info(f"Confusion matrix ({curr_thresh}):\n {conf_matrix}")
model_metrics[f"thresh-{curr_thresh}"] = {
"binary_accuracy": accuracy_score(y_true=np_labels, y_pred=curr_pred),
"binary_precision": precision_score(y_true=np_labels, y_pred=curr_pred,
average="binary", pos_label=0),
"binary_recall": recall_score(y_true=np_labels, y_pred=curr_pred,
average="binary", pos_label=0),
"binary_f1": f1_score(y_true=np_labels, y_pred=curr_pred,
average="binary", pos_label=0)
}
with open(os.path.join(args.experiment_dir, "metrics.json"), "w") as f_metrics:
logging.info(model_metrics)
json.dump(model_metrics, fp=f_metrics, indent=4)
logging.info(model_metrics)
| [
"logging.getLogger",
"logging.StreamHandler",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"transformers.AutoTokenizer.from_pretrained",
"logging.info",
"os.path.exists",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"numpy.ndenumerate",
"matplotlib.pyplot.yticks",... | [((399, 415), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (413, 415), False, 'from argparse import ArgumentParser\n'), ((2195, 2214), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2212, 2214), False, 'import logging\n'), ((2746, 2805), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.pretrained_name_or_path'], {}), '(args.pretrained_name_or_path)\n', (2775, 2805), False, 'from transformers import AutoTokenizer\n'), ((2874, 3047), 'src.data.bitext.WMT14TransformersDataset', 'WMT14TransformersDataset', (['args.train_path'], {'tokenizer': 'tokenizer', 'max_length': 'args.max_seq_len', 'return_tensors': '"""pt"""', 'reverse_order': 'args.reverse_order', 'nrows': 'args.nrows'}), "(args.train_path, tokenizer=tokenizer, max_length=\n args.max_seq_len, return_tensors='pt', reverse_order=args.reverse_order,\n nrows=args.nrows)\n", (2898, 3047), False, 'from src.data.bitext import WMT14TransformersDataset\n'), ((3176, 3325), 'src.data.bitext.WMT14TransformersDataset', 'WMT14TransformersDataset', (['args.dev_path'], {'tokenizer': 'tokenizer', 'max_length': 'args.max_seq_len', 'return_tensors': '"""pt"""', 'reverse_order': 'args.reverse_order'}), "(args.dev_path, tokenizer=tokenizer, max_length=\n args.max_seq_len, return_tensors='pt', reverse_order=args.reverse_order)\n", (3200, 3325), False, 'from src.data.bitext import WMT14TransformersDataset\n'), ((3414, 3564), 'src.data.bitext.WMT14TransformersDataset', 'WMT14TransformersDataset', (['args.test_path'], {'tokenizer': 'tokenizer', 'max_length': 'args.max_seq_len', 'return_tensors': '"""pt"""', 'reverse_order': 'args.reverse_order'}), "(args.test_path, tokenizer=tokenizer, max_length=\n args.max_seq_len, return_tensors='pt', reverse_order=args.reverse_order)\n", (3438, 3564), False, 'from src.data.bitext import WMT14TransformersDataset\n'), ((4652, 4711), 'src.models.nli_trainer.TransformersNLITrainer.from_pretrained', 'TransformersNLITrainer.from_pretrained', (['args.experiment_dir'], {}), '(args.experiment_dir)\n', (4690, 4711), False, 'from src.models.nli_trainer import TransformersNLITrainer\n'), ((6644, 6671), 'logging.info', 'logging.info', (['model_metrics'], {}), '(model_metrics)\n', (6656, 6671), False, 'import logging\n'), ((1930, 1965), 'os.path.exists', 'os.path.exists', (['args.experiment_dir'], {}), '(args.experiment_dir)\n', (1944, 1965), False, 'import os\n'), ((1975, 2007), 'os.makedirs', 'os.makedirs', (['args.experiment_dir'], {}), '(args.experiment_dir)\n', (1986, 2007), False, 'import os\n'), ((2274, 2307), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (2295, 2307), False, 'import logging\n'), ((2690, 2728), 'logging.info', 'logging.info', (['f"""|{k:30s}|{v_str:50s}|"""'], {}), "(f'|{k:30s}|{v_str:50s}|')\n", (2702, 2728), False, 'import logging\n'), ((5270, 5322), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', ([], {'y_true': 'np_labels', 'y_pred': 'curr_pred'}), '(y_true=np_labels, y_pred=curr_pred)\n', (5286, 5322), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\n'), ((5331, 5369), 'matplotlib.pyplot.matshow', 'plt.matshow', (['conf_matrix'], {'cmap': '"""Blues"""'}), "(conf_matrix, cmap='Blues')\n", (5342, 5369), True, 'import matplotlib.pyplot as plt\n'), ((5395, 5422), 'numpy.ndenumerate', 'np.ndenumerate', (['conf_matrix'], {}), '(conf_matrix)\n', (5409, 5422), True, 'import numpy as np\n'), ((5574, 5614), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1]', 'test_set.label_names'], {}), '([0, 1], test_set.label_names)\n', (5584, 5614), True, 'import matplotlib.pyplot as plt\n'), ((5623, 5663), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1]', 'test_set.label_names'], {}), '([0, 1], test_set.label_names)\n', (5633, 5663), True, 'import matplotlib.pyplot as plt\n'), ((5672, 5694), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""(y_pred)"""'], {}), "('(y_pred)')\n", (5682, 5694), True, 'import matplotlib.pyplot as plt\n'), ((5798, 5867), 'logging.info', 'logging.info', (['f"""Confusion matrix ({curr_thresh}):\n {conf_matrix}"""'], {}), '(f"""Confusion matrix ({curr_thresh}):\n {conf_matrix}""")\n', (5810, 5867), False, 'import logging\n'), ((6554, 6581), 'logging.info', 'logging.info', (['model_metrics'], {}), '(model_metrics)\n', (6566, 6581), False, 'import logging\n'), ((6590, 6638), 'json.dump', 'json.dump', (['model_metrics'], {'fp': 'f_metrics', 'indent': '(4)'}), '(model_metrics, fp=f_metrics, indent=4)\n', (6599, 6638), False, 'import json\n'), ((2023, 2082), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""experiment_config.json"""'], {}), "(args.experiment_dir, 'experiment_config.json')\n", (2035, 2082), False, 'import os\n'), ((2354, 2405), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""experiment.log"""'], {}), "(args.experiment_dir, 'experiment.log')\n", (2366, 2405), False, 'import os\n'), ((2443, 2508), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(levelname)-5.5s] %(message)s"""'], {}), "('%(asctime)s [%(levelname)-5.5s] %(message)s')\n", (2460, 2508), False, 'import logging\n'), ((5716, 5788), 'os.path.join', 'os.path.join', (['args.experiment_dir', 'f"""confusion_matrix_{curr_thresh}.png"""'], {}), "(args.experiment_dir, f'confusion_matrix_{curr_thresh}.png')\n", (5728, 5788), False, 'import os\n'), ((5948, 5998), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'np_labels', 'y_pred': 'curr_pred'}), '(y_true=np_labels, y_pred=curr_pred)\n', (5962, 5998), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\n'), ((6032, 6118), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'np_labels', 'y_pred': 'curr_pred', 'average': '"""binary"""', 'pos_label': '(0)'}), "(y_true=np_labels, y_pred=curr_pred, average='binary',\n pos_label=0)\n", (6047, 6118), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\n'), ((6193, 6272), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'np_labels', 'y_pred': 'curr_pred', 'average': '"""binary"""', 'pos_label': '(0)'}), "(y_true=np_labels, y_pred=curr_pred, average='binary', pos_label=0)\n", (6205, 6272), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\n'), ((6341, 6416), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'np_labels', 'y_pred': 'curr_pred', 'average': '"""binary"""', 'pos_label': '(0)'}), "(y_true=np_labels, y_pred=curr_pred, average='binary', pos_label=0)\n", (6349, 6416), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\n'), ((6476, 6525), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""metrics.json"""'], {}), "(args.experiment_dir, 'metrics.json')\n", (6488, 6525), False, 'import os\n')] |
# Copyright (c) 2020 NVIDIA Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Represent various robotic gripper hands."""
import os
import numpy as np
import trimesh
import trimesh.transformations as tra
from . import utilities
try:
from trimesh.collision import fcl
fcl_import_failed = False
except Exception:
fcl_import_failed = True
# TODO: inheritance
class Hand(object):
"""Super class for hands."""
def __init__(self):
"""Initialize offset."""
self.offset = np.eye(4)
def get_closing_rays(self, transform):
"""Closing rays."""
return (
transform[:3, :].dot(self.ray_origins.T).T,
transform[:3, :3].dot(self.ray_directions.T).T,
)
def get_obbs(self):
"""Get oriented bounding boxes."""
return [
self.finger_l.bounding_box,
self.finger_r.bounding_box,
self.base.bounding_box,
]
@property
def offset(self):
"""Return offset."""
return self._offset
@property
def offset_inv(self):
"""Return inverse of offset."""
return self._offset_inv
@offset.setter
def offset(self, value):
"""Set offset and inverse offset."""
self._offset = value
self._offset_inv = tra.inverse_matrix(value)
def show(self, show_rays=False):
"""Visualize hand."""
if show_rays:
a, b = self.get_closing_rays(np.eye(4))
rays_as_points = []
for x in np.linspace(0, 0.03, 20):
rays_as_points.append(
trimesh.points.PointCloud(vertices=a + b * x, colors=[255, 0, 0])
)
trimesh.Scene([self.mesh] + rays_as_points).show()
else:
self.mesh.show()
class PandaGripper(Hand):
"""Class for the Panda gripper."""
def __init__(
self,
configuration=None,
num_contact_points_per_finger=10,
finger_mesh_filename="data/hands/panda_gripper/finger.stl",
palm_mesh_filename="data/hands/panda_gripper/hand.stl",
offset=np.eye(4),
finger_scale=[1.0, 1.0, 1.0],
):
"""Initialize attributes."""
self.joint_limits = [0.0, 0.04]
self.default_pregrasp_configuration = 0.0
self.maximum_aperture = 0.08
self.standoff_fingertips = 0.1
self.offset = offset
self.closing_region = trimesh.primitives.creation.box(
extents=[0.08, 0.01, 0.04],
transform=tra.translation_matrix([0.0, 0.0, 0.09]),
)
if configuration is None:
configuration = self.default_pregrasp_configuration
self.configuration = configuration
res_path = utilities.get_resource_path()
fn_base = os.path.join(res_path, palm_mesh_filename)
fn_finger = os.path.join(res_path, finger_mesh_filename)
self.base = trimesh.load(fn_base)
# After API change:
# https://github.com/mikedh/trimesh/issues/507
if isinstance(self.base, trimesh.scene.Scene):
self.base = self.base.dump().tolist()
self.base = trimesh.util.concatenate(self.base)
if isinstance(self.base, list) and len(self.base) == 5:
for i in range(len(self.base)):
self.base[i].visual = trimesh.visual.ColorVisuals()
for facet in self.base[i].facets:
self.base.visual.face_colors[facet] = trimesh.visual.random_color()
self.base = trimesh.util.concatenate(self.base)
self.finger_l = trimesh.load(fn_finger)
self.finger_l.apply_scale(finger_scale)
# After API change:
# https://github.com/mikedh/trimesh/issues/507
if isinstance(self.finger_l, trimesh.scene.Scene):
self.finger_l = self.finger_l.dump().tolist()
if isinstance(self.finger_l, list) and len(self.finger_l) == 2:
for i in range(len(self.finger_l)):
self.finger_l[i].visual = trimesh.visual.ColorVisuals()
# finger - silver
self.finger_l[0].visual.face_colors[:] = np.array(
[192, 192, 192, 255], dtype=np.uint8
)
# fingertip - black
self.finger_l[1].visual.face_colors[:] = np.array(
[9, 9, 9, 255], dtype=np.uint8
)
self.finger_l = trimesh.util.concatenate(self.finger_l)
self.finger_r = self.finger_l.copy()
# transform fingers relative to the base
self.finger_l.apply_transform(tra.euler_matrix(0, 0, np.pi))
self.finger_l.apply_translation([+configuration, 0, 0.0584])
self.finger_r.apply_translation([-configuration, 0, 0.0584])
# generate fcl collision geometry
if not fcl_import_failed and fcl:
self.fcl_objs = [
fcl.CollisionObject(
fcl.Box(*part.bounding_box.primitive.extents),
utilities.numpy_to_fcl_transform(
offset @ part.bounding_box.primitive.transform
),
)
for part in [self.finger_l, self.finger_r, self.base]
]
self.fcl_transforms = [
offset @ part.bounding_box.primitive.transform
for part in [self.finger_l, self.finger_r, self.base]
]
# generate rays for heuristics and contact tests
self.ray_origins = []
self.ray_directions = []
for i in np.linspace(
-0.01 * finger_scale[-1],
0.02 * finger_scale[-1],
num_contact_points_per_finger,
):
self.ray_origins.append(
offset @ np.r_[self.finger_l.bounding_box.centroid + [0, 0, i], 1]
)
self.ray_origins.append(
offset @ np.r_[self.finger_r.bounding_box.centroid + [0, 0, i], 1]
)
tmp = (
offset
@ np.r_[-self.finger_l.bounding_box.primitive.transform[:3, 0], 1]
)
self.ray_directions.append(tmp[:3])
tmp = (
offset
@ np.r_[+self.finger_r.bounding_box.primitive.transform[:3, 0], 1]
)
self.ray_directions.append(tmp[:3])
self.ray_origins = np.array(self.ray_origins)
self.ray_directions = np.array(self.ray_directions)
# transform according to offset
self.base.apply_transform(offset)
self.closing_region.apply_transform(offset)
self.finger_l.apply_transform(offset)
self.finger_r.apply_transform(offset)
self.fingers = trimesh.util.concatenate([self.finger_l, self.finger_r])
self.mesh = trimesh.util.concatenate([self.fingers, self.base])
self.standoff_range = np.array(
[
max(
self.finger_l.bounding_box.bounds[0, 2],
self.base.bounding_box.bounds[1, 2],
),
self.finger_l.bounding_box.bounds[1, 2],
]
)
self.standoff_range[0] += 0.001
def get_obbs(self):
"""Get oriented bounding boxes."""
return [
self.finger_l.bounding_box,
self.finger_r.bounding_box,
self.base.bounding_box,
]
def get_fcl_collision_objects(self):
"""Get objects in collision."""
return self.fcl_objs
def get_fcl_transforms(self):
"""Get fcl transforms."""
return self.fcl_transforms
available_grippers = {
"panda": {"cls": PandaGripper, "params": {}},
"panda_original": {
"cls": PandaGripper,
"params": {
"offset": tra.rotation_matrix(-np.pi / 2.0, [0, 0, 1]),
},
},
"panda_franka_link7": {
"cls": PandaGripper,
"params": {
"offset": tra.compose_matrix(
angles=[0, 0, -0.75 * np.pi], translate=[0, 0, 0.107]
),
},
},
"panda_franka_link7_longfingers": {
"cls": PandaGripper,
"params": {
"offset": tra.compose_matrix(
angles=[0, 0, -0.75 * np.pi], translate=[0, 0, 0.107]
),
"finger_scale": [1.0, 1.0, 1.75],
},
},
"panda_original_longfingers": {
"cls": PandaGripper,
"params": {
"offset": tra.rotation_matrix(-np.pi / 2.0, [0, 0, 1]),
"finger_scale": [1.0, 1.0, 1.75],
},
},
"panda_visual": {
"cls": PandaGripper,
"params": {
"finger_mesh_filename": "data/hands/panda_gripper/visual/finger_detail.stl",
"palm_mesh_filename": "data/hands/panda_gripper/visual/hand_detail.stl",
},
},
"panda_visual_colored": {
"cls": PandaGripper,
"params": {
"finger_mesh_filename": "data/hands/panda_gripper/visual/finger_detail.obj",
"palm_mesh_filename": "data/hands/panda_gripper/visual/hand_detail.obj",
},
},
"panda_tube": {
"cls": PandaGripper,
"params": {
"finger_mesh_filename": "data/hands/panda_gripper/visual/finger_tube.stl",
"palm_mesh_filename": "data/hands/panda_gripper/visual/base_tube.stl",
},
},
"panda_tube_franka_link7": {
"cls": PandaGripper,
"params": {
"finger_mesh_filename": "data/hands/panda_gripper/visual/finger_tube.stl",
"palm_mesh_filename": "data/hands/panda_gripper/visual/base_tube.stl",
"offset": tra.compose_matrix(
angles=[0, 0, -0.75 * np.pi], translate=[0, 0, 0.107]
),
},
},
"panda_tube_franka_link7_longfingers": {
"cls": PandaGripper,
"params": {
"finger_mesh_filename": "data/hands/panda_gripper/visual/finger_tube.stl",
"palm_mesh_filename": "data/hands/panda_gripper/visual/base_tube.stl",
"offset": tra.compose_matrix(
angles=[0, 0, -0.75 * np.pi], translate=[0, 0, 0.107]
),
"finger_scale": [1.0, 1.0, 1.75],
},
},
}
def get_available_grippers():
"""Get available grippers."""
return list(available_grippers.keys())
def get_gripper_name(gripper):
"""Get gripper name."""
for k, v in available_grippers.items():
if isinstance(gripper, v["cls"]): # TODO: check meshes
return k
def create_gripper(name, configuration=None):
"""Create a gripper."""
cfg = available_grippers[name.lower()]
return cfg["cls"](configuration=configuration, **cfg["params"])
| [
"trimesh.transformations.inverse_matrix",
"trimesh.transformations.rotation_matrix",
"numpy.eye",
"trimesh.transformations.compose_matrix",
"trimesh.collision.fcl.Box",
"trimesh.points.PointCloud",
"os.path.join",
"trimesh.load",
"numpy.linspace",
"numpy.array",
"trimesh.util.concatenate",
"tr... | [((1526, 1535), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1532, 1535), True, 'import numpy as np\n'), ((2319, 2344), 'trimesh.transformations.inverse_matrix', 'tra.inverse_matrix', (['value'], {}), '(value)\n', (2337, 2344), True, 'import trimesh.transformations as tra\n'), ((3131, 3140), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3137, 3140), True, 'import numpy as np\n'), ((3808, 3850), 'os.path.join', 'os.path.join', (['res_path', 'palm_mesh_filename'], {}), '(res_path, palm_mesh_filename)\n', (3820, 3850), False, 'import os\n'), ((3871, 3915), 'os.path.join', 'os.path.join', (['res_path', 'finger_mesh_filename'], {}), '(res_path, finger_mesh_filename)\n', (3883, 3915), False, 'import os\n'), ((3936, 3957), 'trimesh.load', 'trimesh.load', (['fn_base'], {}), '(fn_base)\n', (3948, 3957), False, 'import trimesh\n'), ((4608, 4631), 'trimesh.load', 'trimesh.load', (['fn_finger'], {}), '(fn_finger)\n', (4620, 4631), False, 'import trimesh\n'), ((6553, 6650), 'numpy.linspace', 'np.linspace', (['(-0.01 * finger_scale[-1])', '(0.02 * finger_scale[-1])', 'num_contact_points_per_finger'], {}), '(-0.01 * finger_scale[-1], 0.02 * finger_scale[-1],\n num_contact_points_per_finger)\n', (6564, 6650), True, 'import numpy as np\n'), ((7367, 7393), 'numpy.array', 'np.array', (['self.ray_origins'], {}), '(self.ray_origins)\n', (7375, 7393), True, 'import numpy as np\n'), ((7424, 7453), 'numpy.array', 'np.array', (['self.ray_directions'], {}), '(self.ray_directions)\n', (7432, 7453), True, 'import numpy as np\n'), ((7705, 7761), 'trimesh.util.concatenate', 'trimesh.util.concatenate', (['[self.finger_l, self.finger_r]'], {}), '([self.finger_l, self.finger_r])\n', (7729, 7761), False, 'import trimesh\n'), ((7782, 7833), 'trimesh.util.concatenate', 'trimesh.util.concatenate', (['[self.fingers, self.base]'], {}), '([self.fingers, self.base])\n', (7806, 7833), False, 'import trimesh\n'), ((2540, 2564), 'numpy.linspace', 'np.linspace', (['(0)', '(0.03)', '(20)'], {}), '(0, 0.03, 20)\n', (2551, 2564), True, 'import numpy as np\n'), ((4171, 4206), 'trimesh.util.concatenate', 'trimesh.util.concatenate', (['self.base'], {}), '(self.base)\n', (4195, 4206), False, 'import trimesh\n'), ((4547, 4582), 'trimesh.util.concatenate', 'trimesh.util.concatenate', (['self.base'], {}), '(self.base)\n', (4571, 4582), False, 'import trimesh\n'), ((5158, 5204), 'numpy.array', 'np.array', (['[192, 192, 192, 255]'], {'dtype': 'np.uint8'}), '([192, 192, 192, 255], dtype=np.uint8)\n', (5166, 5204), True, 'import numpy as np\n'), ((5320, 5360), 'numpy.array', 'np.array', (['[9, 9, 9, 255]'], {'dtype': 'np.uint8'}), '([9, 9, 9, 255], dtype=np.uint8)\n', (5328, 5360), True, 'import numpy as np\n'), ((5420, 5459), 'trimesh.util.concatenate', 'trimesh.util.concatenate', (['self.finger_l'], {}), '(self.finger_l)\n', (5444, 5459), False, 'import trimesh\n'), ((5594, 5623), 'trimesh.transformations.euler_matrix', 'tra.euler_matrix', (['(0)', '(0)', 'np.pi'], {}), '(0, 0, np.pi)\n', (5610, 5623), True, 'import trimesh.transformations as tra\n'), ((8764, 8808), 'trimesh.transformations.rotation_matrix', 'tra.rotation_matrix', (['(-np.pi / 2.0)', '[0, 0, 1]'], {}), '(-np.pi / 2.0, [0, 0, 1])\n', (8783, 8808), True, 'import trimesh.transformations as tra\n'), ((8927, 9000), 'trimesh.transformations.compose_matrix', 'tra.compose_matrix', ([], {'angles': '[0, 0, -0.75 * np.pi]', 'translate': '[0, 0, 0.107]'}), '(angles=[0, 0, -0.75 * np.pi], translate=[0, 0, 0.107])\n', (8945, 9000), True, 'import trimesh.transformations as tra\n'), ((9161, 9234), 'trimesh.transformations.compose_matrix', 'tra.compose_matrix', ([], {'angles': '[0, 0, -0.75 * np.pi]', 'translate': '[0, 0, 0.107]'}), '(angles=[0, 0, -0.75 * np.pi], translate=[0, 0, 0.107])\n', (9179, 9234), True, 'import trimesh.transformations as tra\n'), ((9437, 9481), 'trimesh.transformations.rotation_matrix', 'tra.rotation_matrix', (['(-np.pi / 2.0)', '[0, 0, 1]'], {}), '(-np.pi / 2.0, [0, 0, 1])\n', (9456, 9481), True, 'import trimesh.transformations as tra\n'), ((10612, 10685), 'trimesh.transformations.compose_matrix', 'tra.compose_matrix', ([], {'angles': '[0, 0, -0.75 * np.pi]', 'translate': '[0, 0, 0.107]'}), '(angles=[0, 0, -0.75 * np.pi], translate=[0, 0, 0.107])\n', (10630, 10685), True, 'import trimesh.transformations as tra\n'), ((11021, 11094), 'trimesh.transformations.compose_matrix', 'tra.compose_matrix', ([], {'angles': '[0, 0, -0.75 * np.pi]', 'translate': '[0, 0, 0.107]'}), '(angles=[0, 0, -0.75 * np.pi], translate=[0, 0, 0.107])\n', (11039, 11094), True, 'import trimesh.transformations as tra\n'), ((2476, 2485), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2482, 2485), True, 'import numpy as np\n'), ((3546, 3586), 'trimesh.transformations.translation_matrix', 'tra.translation_matrix', (['[0.0, 0.0, 0.09]'], {}), '([0.0, 0.0, 0.09])\n', (3568, 3586), True, 'import trimesh.transformations as tra\n'), ((4354, 4383), 'trimesh.visual.ColorVisuals', 'trimesh.visual.ColorVisuals', ([], {}), '()\n', (4381, 4383), False, 'import trimesh\n'), ((5044, 5073), 'trimesh.visual.ColorVisuals', 'trimesh.visual.ColorVisuals', ([], {}), '()\n', (5071, 5073), False, 'import trimesh\n'), ((2625, 2690), 'trimesh.points.PointCloud', 'trimesh.points.PointCloud', ([], {'vertices': '(a + b * x)', 'colors': '[255, 0, 0]'}), '(vertices=a + b * x, colors=[255, 0, 0])\n', (2650, 2690), False, 'import trimesh\n'), ((2721, 2764), 'trimesh.Scene', 'trimesh.Scene', (['([self.mesh] + rays_as_points)'], {}), '([self.mesh] + rays_as_points)\n', (2734, 2764), False, 'import trimesh\n'), ((4492, 4521), 'trimesh.visual.random_color', 'trimesh.visual.random_color', ([], {}), '()\n', (4519, 4521), False, 'import trimesh\n'), ((5935, 5980), 'trimesh.collision.fcl.Box', 'fcl.Box', (['*part.bounding_box.primitive.extents'], {}), '(*part.bounding_box.primitive.extents)\n', (5942, 5980), False, 'from trimesh.collision import fcl\n')] |
import serial
import paho.mqtt.client as mqtt
import json
from datetime import datetime
from o2_helper import GetO2Voltage
import numpy as np
import socket
ser = serial.Serial('/dev/ttyUSB0', 9600)
ser.readline()
ser.readline()
THINGSBOARD_HOST = '192.168.0.200'
ACCESS_TOKEN = socket.gethostname()
client = mqtt.Client()
client.username_pw_set(ACCESS_TOKEN)
client.connect(THINGSBOARD_HOST, 1883, 60)
client.loop_start()
def XaXfXp(XO2,XCO2, XCH4):
T = np.array([[0.209, 0, 0],
[0,0.319,0.214],
[0,1.897,0]])
X = np.array([XO2,XCO2,XCH4])
return np.linalg.solve(T,X)
def ReadUSBData():
LINE = str(ser.readline())
data_arr = LINE.split(' ')
dv = {}
dv['TimeNow'] = datetime.now().isoformat()
dv['PkPkRef'] = float.fromhex(data_arr[2])/(65536.0/3.0)
dv['PkPk1'] = float.fromhex(data_arr[3])/(65536.0/3.0)
dv['PkPk2'] = float.fromhex(data_arr[4])/(65536.0/3.0)
dv['CH4'] = float(data_arr[5])*100/1000000
dv['CO2'] = float(data_arr[6])*100/1000000
dv['Temperature'] = float(data_arr[7])
dv['O2'], dv['O2Voltage'] = GetO2Voltage()
dv['Xa'], dv['Xf'], dv['Xp'] = XaXfXp(dv['O2'],dv['CO2'], dv['CH4'])
return dv
def do_loop():
y=0
while True:
y+=1
dv = ReadUSBData()
dv['y']=y
client.publish('v1/devices/me/telemetry', json.dumps(dv), 1)
outt = '%s, %5.4f, %5.4f, %5.4f, %5.4f, ' % (dv['TimeNow'], dv['PkPkRef'], dv['PkPk1'], dv['PkPk2'], -1)
outt = outt + '%5.2f, %5.3f, %5.3f, %5.3f, %5.3f, %5.5f\n' % (dv['Temperature'], -1, dv['CO2'], dv['CH4'], dv['O2'], dv['O2Voltage'])
myfile = open('data/NDIRJupiterEV_' + datetime.now().isoformat()[0:13] +'.csv','a')
myfile.write(outt)
myfile.close()
print(y, " Time: CO2: %5.1f CH4: %5.1f O2: %5.1f PkPkRef: %5.3f PkPkCO2: %5.3f PkPkCH4: %5.3f " % (dv['CO2'], dv['CH4'], dv['O2'], dv['PkPkRef'], dv['PkPk1'], dv['PkPk2']))
do_loop()
| [
"o2_helper.GetO2Voltage",
"numpy.linalg.solve",
"paho.mqtt.client.Client",
"json.dumps",
"numpy.array",
"datetime.datetime.now",
"serial.Serial",
"socket.gethostname"
] | [((163, 198), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyUSB0"""', '(9600)'], {}), "('/dev/ttyUSB0', 9600)\n", (176, 198), False, 'import serial\n'), ((280, 300), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (298, 300), False, 'import socket\n'), ((310, 323), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (321, 323), True, 'import paho.mqtt.client as mqtt\n'), ((461, 520), 'numpy.array', 'np.array', (['[[0.209, 0, 0], [0, 0.319, 0.214], [0, 1.897, 0]]'], {}), '([[0.209, 0, 0], [0, 0.319, 0.214], [0, 1.897, 0]])\n', (469, 520), True, 'import numpy as np\n'), ((561, 588), 'numpy.array', 'np.array', (['[XO2, XCO2, XCH4]'], {}), '([XO2, XCO2, XCH4])\n', (569, 588), True, 'import numpy as np\n'), ((598, 619), 'numpy.linalg.solve', 'np.linalg.solve', (['T', 'X'], {}), '(T, X)\n', (613, 619), True, 'import numpy as np\n'), ((1109, 1123), 'o2_helper.GetO2Voltage', 'GetO2Voltage', ([], {}), '()\n', (1121, 1123), False, 'from o2_helper import GetO2Voltage\n'), ((734, 748), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (746, 748), False, 'from datetime import datetime\n'), ((1359, 1373), 'json.dumps', 'json.dumps', (['dv'], {}), '(dv)\n', (1369, 1373), False, 'import json\n'), ((1679, 1693), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1691, 1693), False, 'from datetime import datetime\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import os
import time
currentUrl = os.path.dirname(__file__)
parentUrl = os.path.abspath(os.path.join(currentUrl, os.pardir))
sys.path.append(parentUrl)
import argparse
import numpy as np
import torch
import torch.utils.data
from torch.utils.data import DataLoader
from torchvision.transforms import functional as F
import matplotlib.pyplot as plt
import cv2
from glob import glob
import os.path as osp
from utils.preprocessing import load_img, load_skeleton, get_bbox, process_bbox, augmentation, transform_input_to_output_space, trans_point2d
from utils.transforms import world2cam, cam2pixel, pixel2cam
from utils.vis import vis_keypoints, vis_3d_keypoints, plot_hand
from utils.standard_legends import idx_InterHand
from PIL import Image, ImageDraw
import random
import json
import math
from pycocotools.coco import COCO
import scipy.io as sio
class InterHandDataset(torch.utils.data.Dataset):
def __init__(self, cfg, transforms, mode):
self.cfg = cfg
self.name = 'InterHand'
self.mode = mode # train, test, val
self.img_path = osp.join(cfg.DATASET.DATA_DIR, 'images') # '../data/InterHand2.6M/images'
self.annot_path = osp.join(cfg.DATASET.DATA_DIR, 'annotations') # '../data/InterHand2.6M/annotations'
# if self.mode == 'val':
# self.rootnet_output_path = '../data/InterHand2.6M/rootnet_output/rootnet_interhand2.6m_output_val.json'
# else:
# self.rootnet_output_path = '../data/InterHand2.6M/rootnet_output/rootnet_interhand2.6m_output_test.json'
self.transform = transforms
self.joint_num = cfg.DATASET.NUM_JOINTS # 21 # single hand
self.root_joint_idx = {'right': 0, 'left': 21} # Please modify this idx after changing the order of joints
self.joint_type = {'right': np.arange(0,self.joint_num), 'left': np.arange(self.joint_num,self.joint_num*2)}
self.skeleton = load_skeleton(osp.join(self.annot_path, 'skeleton.txt'), self.joint_num*2)
self.datalist = []
self.datalist_sh = []
self.datalist_ih = []
self.sequence_names = []
# load annotation
print("Load annotation from " + osp.join(self.annot_path, self.mode))
t1 = time.time()
prefix = 'simple_'
db = COCO(osp.join(self.annot_path, self.mode, prefix+'InterHand2.6M_' + self.mode + '_data.json'))
with open(osp.join(self.annot_path, self.mode, 'InterHand2.6M_' + self.mode + '_camera.json')) as f:
cameras = json.load(f)
with open(osp.join(self.annot_path, self.mode, 'InterHand2.6M_' + self.mode + '_joint_3d.json')) as f:
joints = json.load(f)
print("Annotation loading spent {}s".format(time.time()-t1))
# if (self.mode == 'val' or self.mode == 'test') and cfg.trans_test == 'rootnet':
# print("Get bbox and root depth from " + self.rootnet_output_path)
# rootnet_result = {}
# with open(self.rootnet_output_path) as f:
# annot = json.load(f)
# for i in range(len(annot)):
# rootnet_result[str(annot[i]['annot_id'])] = annot[i]
# else:
# print("Get bbox and root depth from groundtruth annotation")
for aid in db.anns.keys():
ann = db.anns[aid]
image_id = ann['image_id']
img = db.loadImgs(image_id)[0]
capture_id = img['capture']
seq_name = img['seq_name']
cam = img['camera']
frame_idx = img['frame_idx']
img_path = osp.join(self.img_path, self.mode, img['file_name'])
campos, camrot = np.array(cameras[str(capture_id)]['campos'][str(cam)], dtype=np.float32), np.array(cameras[str(capture_id)]['camrot'][str(cam)], dtype=np.float32)
focal, princpt = np.array(cameras[str(capture_id)]['focal'][str(cam)], dtype=np.float32), np.array(cameras[str(capture_id)]['princpt'][str(cam)], dtype=np.float32)
# get the groundtruth pose and reorder it
joint_world = np.array(joints[str(capture_id)][str(frame_idx)]['world_coord'], dtype=np.float32)[idx_InterHand] # 42 x 3
joint_cam = world2cam(joint_world.transpose(1,0), camrot, campos.reshape(3,1)).transpose(1,0)
joint_img = cam2pixel(joint_cam, focal, princpt) # 42 x 2 [u,v]
# 1 if a joint is annotated and inside of image. 0 otherwise
joint_valid = np.array(ann['joint_valid'],dtype=np.float32).reshape(self.joint_num*2)
# if root is not valid -> root-relative 3D pose is also not valid. Therefore, mark all joints as invalid
joint_valid[self.joint_type['right']] *= joint_valid[self.root_joint_idx['right']]
joint_valid[self.joint_type['left']] *= joint_valid[self.root_joint_idx['left']]
hand_type = ann['hand_type']
# 1 if hand_type in ('right', 'left') or hand_type == 'interacting' and np.sum(joint_valid) > 30, 0 otherwise
hand_type_valid = np.array((ann['hand_type_valid']), dtype=np.float32)
# if (self.mode == 'val' or self.mode == 'test') and cfg.trans_test == 'rootnet':
# bbox = np.array(rootnet_result[str(aid)]['bbox'],dtype=np.float32)
# abs_depth = {'right': rootnet_result[str(aid)]['abs_depth'][0], 'left': rootnet_result[str(aid)]['abs_depth'][1]}
# else:
img_width, img_height = img['width'], img['height'] # original image size 344(w) x 512(h)
bbox = np.array(ann['bbox'],dtype=np.float32) # x,y,w,h
bbox = process_bbox(bbox, (img_height, img_width))
abs_depth = {'right': joint_cam[self.root_joint_idx['right'],2], 'left': joint_cam[self.root_joint_idx['left'],2]}
cam_param = {'focal': focal, 'princpt': princpt}
joint = {'cam_coord': joint_cam, 'img_coord': joint_img, 'valid': joint_valid}
data = {'img_path': img_path, 'seq_name': seq_name, 'cam_param': cam_param, 'bbox': bbox, 'joint': joint, 'hand_type': hand_type, 'hand_type_valid': hand_type_valid, 'abs_depth': abs_depth, 'file_name': img['file_name'], 'capture': capture_id, 'cam': cam, 'frame': frame_idx}
if hand_type == 'right' or hand_type == 'left':
self.datalist_sh.append(data)
else:
self.datalist_ih.append(data)
if seq_name not in self.sequence_names:
self.sequence_names.append(seq_name)
self.datalist = self.datalist_sh + self.datalist_ih
print('Number of annotations in single hand sequences: ' + str(len(self.datalist_sh)))
print('Number of annotations in interacting hand sequences: ' + str(len(self.datalist_ih)))
def handtype_str2array(self, hand_type):
if hand_type == 'right':
return np.array([1,0], dtype=np.float32)
elif hand_type == 'left':
return np.array([0,1], dtype=np.float32)
elif hand_type == 'interacting':
return np.array([1,1], dtype=np.float32)
else:
assert 0, print('Not supported hand type: ' + hand_type)
def __len__(self):
return len(self.datalist)
def __getitem__(self, idx):
data = self.datalist[idx]
img_path, bbox, joint, hand_type, hand_type_valid = data['img_path'], data['bbox'], data['joint'], data['hand_type'], data['hand_type_valid']
joint_cam = joint['cam_coord'].copy()
joint_img = joint['img_coord'].copy()
joint_valid = joint['valid'].copy() # 1 if inside the image, o other wise. # 42
hand_type_vec = self.handtype_str2array(hand_type)
joint_coord = np.concatenate((joint_img, joint_cam[:,2,None]),1) # 42 x 3 [u,v,z]
# input(joint_valid)
# input(joint_coord)
# image load
try:
img = cv2.cvtColor(cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION), cv2.COLOR_BGR2RGB) # 512 x 334 x 3
except:
print('[Warning] Invalid image path:', img_path)
# DEBUG
# f = plt.figure()
# ax1 = f.add_subplot(1,1,1)
# ax1.imshow(img)
# for k in range(joint_coord.shape[0]):
# print('[{:.4f}, {:.4f}, {:.4f}],'.format(*joint_coord[k]))
# print(hand_type_vec)
# if hand_type_vec[0] == 1:
# plot_hand(ax1, joint_coord[0:21,0:2], vis=joint_valid[0:21], order = 'uv')
# elif hand_type_vec[1] == 1:
# plot_hand(ax1, joint_coord[21:42,0:2], vis=joint_valid[21:42], order = 'uv')
# ax1.set_title(hand_type)
# plt.show()
# augmentation
img, joint_coord, joint_valid, hand_type_vec, inv_trans = augmentation(img, bbox, joint_coord, joint_valid, hand_type_vec, self.mode, self.joint_type, self.cfg.MODEL.INPUT_SIZE)
# f1 = plt.figure()
# ax1 = f1.add_subplot(1,1,1)
# ax1.imshow(img.astype(int))
# for k in range(joint_coord.shape[0]):
# print('[{:.4f}, {:.4f}, {:.4f}],'.format(*joint_coord[k]))
# print(joint_coord)
# if hand_type_vec[0] == 1:
# plot_hand(ax1, joint_coord[0:21,0:2], vis=joint_valid[0:21], order = 'uv')
# elif hand_type_vec[1] == 1:
# plot_hand(ax1, joint_coord[21:42,0:2], vis=joint_valid[21:42], order = 'uv')
# ax1.set_title(hand_type)
# plt.show()
#rel_root_depth = np.array([joint_coord[self.root_joint_idx['left'],2] - joint_coord[self.root_joint_idx['right'],2]],dtype=np.float32).reshape(1)
#root_valid = np.array([joint_valid[self.root_joint_idx['right']] * joint_valid[self.root_joint_idx['left']]],dtype=np.float32).reshape(1) if hand_type_vec[0]*hand_type_vec[1] == 1 else np.zeros((1),dtype=np.float32)
# transform to output heatmap space (this line of code is useless for anchor-based estimation)
#joint_coord, joint_valid, rel_root_depth, root_valid = transform_input_to_output_space(self.cfg, joint_coord, joint_valid, rel_root_depth, root_valid, self.root_joint_idx, self.joint_type)
img = self.transform(img.astype(np.float32) / 255.)
# inputs = {'img': img}
# targets = {'pose2d_gt': joint_coord, 'rel_root_depth': rel_root_depth, 'hand_type': hand_type_vec}
# meta_info = {'joint_valid': joint_valid, 'root_valid': root_valid, 'hand_type_valid': hand_type_valid, 'inv_trans': inv_trans, 'capture': int(data['capture']), 'cam': int(data['cam']), 'frame': int(data['frame'])}
return {'imgs': img, 'pose2d_gt': joint_coord, 'joint_valid': joint_valid, 'hand_type': hand_type_vec}
#return inputs, targets, meta_info
def evaluate(self, preds):
print()
print('Evaluation start...')
gts = self.datalist
preds_joint_coord, preds_rel_root_depth, preds_hand_type, inv_trans = preds['joint_coord'], preds['rel_root_depth'], preds['hand_type'], preds['inv_trans']
assert len(gts) == len(preds_joint_coord)
sample_num = len(gts)
mpjpe_sh = [[] for _ in range(self.joint_num*2)]
mpjpe_ih = [[] for _ in range(self.joint_num*2)]
mrrpe = []
acc_hand_cls = 0
hand_cls_cnt = 0
for n in range(sample_num):
data = gts[n]
bbox, cam_param, joint, gt_hand_type, hand_type_valid = data['bbox'], data['cam_param'], data['joint'], data['hand_type'], data['hand_type_valid']
focal = cam_param['focal']
princpt = cam_param['princpt']
gt_joint_coord = joint['cam_coord']
joint_valid = joint['valid']
# restore xy coordinates to original image space
pred_joint_coord_img = preds_joint_coord[n].copy()
pred_joint_coord_img[:,0] = pred_joint_coord_img[:,0]/cfg.output_hm_shape[2]*cfg.input_img_shape[1]
pred_joint_coord_img[:,1] = pred_joint_coord_img[:,1]/cfg.output_hm_shape[1]*cfg.input_img_shape[0]
for j in range(self.joint_num*2):
pred_joint_coord_img[j,:2] = trans_point2d(pred_joint_coord_img[j,:2],inv_trans[n])
# restore depth to original camera space
pred_joint_coord_img[:,2] = (pred_joint_coord_img[:,2]/cfg.output_hm_shape[0] * 2 - 1) * (cfg.bbox_3d_size/2)
# mrrpe
if gt_hand_type == 'interacting' and joint_valid[self.root_joint_idx['left']] and joint_valid[self.root_joint_idx['right']]:
pred_rel_root_depth = (preds_rel_root_depth[n]/cfg.output_root_hm_shape * 2 - 1) * (cfg.bbox_3d_size_root/2)
pred_left_root_img = pred_joint_coord_img[self.root_joint_idx['left']].copy()
pred_left_root_img[2] += data['abs_depth']['right'] + pred_rel_root_depth
pred_left_root_cam = pixel2cam(pred_left_root_img[None,:], focal, princpt)[0]
pred_right_root_img = pred_joint_coord_img[self.root_joint_idx['right']].copy()
pred_right_root_img[2] += data['abs_depth']['right']
pred_right_root_cam = pixel2cam(pred_right_root_img[None,:], focal, princpt)[0]
pred_rel_root = pred_left_root_cam - pred_right_root_cam
gt_rel_root = gt_joint_coord[self.root_joint_idx['left']] - gt_joint_coord[self.root_joint_idx['right']]
mrrpe.append(float(np.sqrt(np.sum((pred_rel_root - gt_rel_root)**2))))
# add root joint depth
pred_joint_coord_img[self.joint_type['right'],2] += data['abs_depth']['right']
pred_joint_coord_img[self.joint_type['left'],2] += data['abs_depth']['left']
# back project to camera coordinate system
pred_joint_coord_cam = pixel2cam(pred_joint_coord_img, focal, princpt)
# root joint alignment
for h in ('right', 'left'):
pred_joint_coord_cam[self.joint_type[h]] = pred_joint_coord_cam[self.joint_type[h]] - pred_joint_coord_cam[self.root_joint_idx[h],None,:]
gt_joint_coord[self.joint_type[h]] = gt_joint_coord[self.joint_type[h]] - gt_joint_coord[self.root_joint_idx[h],None,:]
# mpjpe
for j in range(self.joint_num*2):
if joint_valid[j]:
if gt_hand_type == 'right' or gt_hand_type == 'left':
mpjpe_sh[j].append(np.sqrt(np.sum((pred_joint_coord_cam[j] - gt_joint_coord[j])**2)))
else:
mpjpe_ih[j].append(np.sqrt(np.sum((pred_joint_coord_cam[j] - gt_joint_coord[j])**2)))
# handedness accuray
if hand_type_valid:
if gt_hand_type == 'right' and preds_hand_type[n][0] > 0.5 and preds_hand_type[n][1] < 0.5:
acc_hand_cls += 1
elif gt_hand_type == 'left' and preds_hand_type[n][0] < 0.5 and preds_hand_type[n][1] > 0.5:
acc_hand_cls += 1
elif gt_hand_type == 'interacting' and preds_hand_type[n][0] > 0.5 and preds_hand_type[n][1] > 0.5:
acc_hand_cls += 1
hand_cls_cnt += 1
vis = False
if vis:
img_path = data['img_path']
cvimg = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
_img = cvimg[:,:,::-1].transpose(2,0,1)
vis_kps = pred_joint_coord_img.copy()
vis_valid = joint_valid.copy()
capture = str(data['capture'])
cam = str(data['cam'])
frame = str(data['frame'])
filename = 'out_' + str(n) + '_' + gt_hand_type + '.jpg'
vis_keypoints(_img, vis_kps, vis_valid, self.skeleton, filename)
vis = False
if vis:
filename = 'out_' + str(n) + '_3d.jpg'
vis_3d_keypoints(pred_joint_coord_cam, joint_valid, self.skeleton, filename)
if hand_cls_cnt > 0: print('Handedness accuracy: ' + str(acc_hand_cls / hand_cls_cnt))
if len(mrrpe) > 0: print('MRRPE: ' + str(sum(mrrpe)/len(mrrpe)))
print()
tot_err = []
eval_summary = 'MPJPE for each joint: \n'
for j in range(self.joint_num*2):
tot_err_j = np.mean(np.concatenate((np.stack(mpjpe_sh[j]), np.stack(mpjpe_ih[j]))))
joint_name = self.skeleton[j]['name']
eval_summary += (joint_name + ': %.2f, ' % tot_err_j)
tot_err.append(tot_err_j)
print(eval_summary)
print('MPJPE for all hand sequences: %.2f' % (np.mean(tot_err)))
print()
eval_summary = 'MPJPE for each joint: \n'
for j in range(self.joint_num*2):
mpjpe_sh[j] = np.mean(np.stack(mpjpe_sh[j]))
joint_name = self.skeleton[j]['name']
eval_summary += (joint_name + ': %.2f, ' % mpjpe_sh[j])
print(eval_summary)
print('MPJPE for single hand sequences: %.2f' % (np.mean(mpjpe_sh)))
print()
eval_summary = 'MPJPE for each joint: \n'
for j in range(self.joint_num*2):
mpjpe_ih[j] = np.mean(np.stack(mpjpe_ih[j]))
joint_name = self.skeleton[j]['name']
eval_summary += (joint_name + ': %.2f, ' % mpjpe_ih[j])
print(eval_summary)
print('MPJPE for interacting hand sequences: %.2f' % (np.mean(mpjpe_ih)))
if __name__ == "__main__":
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
lib_path = osp.join(this_dir, '..', '..','lib')
add_path(lib_path)
mm_path = osp.join(this_dir, '..', 'lib/poseeval/py-motmetrics')
add_path(mm_path)
from torchvision import transforms
from config.default import _C as cfg
from config.default import update_config
from utils.vis import plot_hand
parser = argparse.ArgumentParser(description='Train keypoints network')
parser.add_argument('--cfg',
help='experiment configure file name',
default='../../experiments/exp_test.yaml',
type=str)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
args.cfg = "../../experiments/exp_test.yaml"
update_config(cfg, args)
cfg.defrost()
dataset = InterHandDataset(cfg, transforms.ToTensor(), "train")
batch_generator = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0, pin_memory=True)
for itr, (inputs, targets, meta_info) in enumerate(batch_generator):
img = inputs['imgs'].numpy().squeeze()*255 # 1 x 3 x 256 x 256
joint_coord = targets['pose2d_gt'].numpy().squeeze() # [42, 3] # u,v pixel, z root-relative discretized depth
joint_valid = meta_info['joint_valid'].numpy().squeeze() # [42]
filename = 'result_2d.jpg'
for k in range(joint_coord.shape[0]):
print('[{},{}],'.format(joint_coord[k,0],joint_coord[k,1]))
vis_img = vis_keypoints(img, joint_coord, joint_valid, dataset.skeleton, filename, save_path='.')
filename = 'result_3d'
vis_3d_keypoints(joint_coord, joint_valid, dataset.skeleton, filename)
| [
"utils.vis.vis_keypoints",
"sys.path.insert",
"config.default.update_config",
"numpy.array",
"utils.transforms.cam2pixel",
"sys.path.append",
"numpy.arange",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.stack",
"numpy.concatenate",
"torchvision.transforms.ToTensor",
"utils.preprocessing.t... | [((244, 269), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (259, 269), False, 'import os\n'), ((335, 361), 'sys.path.append', 'sys.path.append', (['parentUrl'], {}), '(parentUrl)\n', (350, 361), False, 'import sys\n'), ((298, 333), 'os.path.join', 'os.path.join', (['currentUrl', 'os.pardir'], {}), '(currentUrl, os.pardir)\n', (310, 333), False, 'import os\n'), ((17793, 17814), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (17804, 17814), True, 'import os.path as osp\n'), ((17831, 17868), 'os.path.join', 'osp.join', (['this_dir', '""".."""', '""".."""', '"""lib"""'], {}), "(this_dir, '..', '..', 'lib')\n", (17839, 17868), True, 'import os.path as osp\n'), ((17906, 17960), 'os.path.join', 'osp.join', (['this_dir', '""".."""', '"""lib/poseeval/py-motmetrics"""'], {}), "(this_dir, '..', 'lib/poseeval/py-motmetrics')\n", (17914, 17960), True, 'import os.path as osp\n'), ((18167, 18229), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train keypoints network"""'}), "(description='Train keypoints network')\n", (18190, 18229), False, 'import argparse\n'), ((18709, 18733), 'config.default.update_config', 'update_config', (['cfg', 'args'], {}), '(cfg, args)\n', (18722, 18733), False, 'from config.default import update_config\n'), ((18738, 18751), 'config.default._C.defrost', 'cfg.defrost', ([], {}), '()\n', (18749, 18751), True, 'from config.default import _C as cfg\n'), ((18847, 18939), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(0)', 'pin_memory': '(True)'}), '(dataset=dataset, batch_size=1, shuffle=False, num_workers=0,\n pin_memory=True)\n', (18857, 18939), False, 'from torch.utils.data import DataLoader\n'), ((1284, 1324), 'os.path.join', 'osp.join', (['cfg.DATASET.DATA_DIR', '"""images"""'], {}), "(cfg.DATASET.DATA_DIR, 'images')\n", (1292, 1324), True, 'import os.path as osp\n'), ((1384, 1429), 'os.path.join', 'osp.join', (['cfg.DATASET.DATA_DIR', '"""annotations"""'], {}), "(cfg.DATASET.DATA_DIR, 'annotations')\n", (1392, 1429), True, 'import os.path as osp\n'), ((2446, 2457), 'time.time', 'time.time', ([], {}), '()\n', (2455, 2457), False, 'import time\n'), ((7921, 7974), 'numpy.concatenate', 'np.concatenate', (['(joint_img, joint_cam[:, 2, None])', '(1)'], {}), '((joint_img, joint_cam[:, 2, None]), 1)\n', (7935, 7974), True, 'import numpy as np\n'), ((8956, 9079), 'utils.preprocessing.augmentation', 'augmentation', (['img', 'bbox', 'joint_coord', 'joint_valid', 'hand_type_vec', 'self.mode', 'self.joint_type', 'self.cfg.MODEL.INPUT_SIZE'], {}), '(img, bbox, joint_coord, joint_valid, hand_type_vec, self.mode,\n self.joint_type, self.cfg.MODEL.INPUT_SIZE)\n', (8968, 9079), False, 'from utils.preprocessing import load_img, load_skeleton, get_bbox, process_bbox, augmentation, transform_input_to_output_space, trans_point2d\n'), ((18793, 18814), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (18812, 18814), False, 'from torchvision import transforms\n'), ((19445, 19536), 'utils.vis.vis_keypoints', 'vis_keypoints', (['img', 'joint_coord', 'joint_valid', 'dataset.skeleton', 'filename'], {'save_path': '"""."""'}), "(img, joint_coord, joint_valid, dataset.skeleton, filename,\n save_path='.')\n", (19458, 19536), False, 'from utils.vis import vis_keypoints, vis_3d_keypoints, plot_hand\n'), ((19572, 19642), 'utils.vis.vis_3d_keypoints', 'vis_3d_keypoints', (['joint_coord', 'joint_valid', 'dataset.skeleton', 'filename'], {}), '(joint_coord, joint_valid, dataset.skeleton, filename)\n', (19588, 19642), False, 'from utils.vis import vis_keypoints, vis_3d_keypoints, plot_hand\n'), ((2009, 2037), 'numpy.arange', 'np.arange', (['(0)', 'self.joint_num'], {}), '(0, self.joint_num)\n', (2018, 2037), True, 'import numpy as np\n'), ((2046, 2091), 'numpy.arange', 'np.arange', (['self.joint_num', '(self.joint_num * 2)'], {}), '(self.joint_num, self.joint_num * 2)\n', (2055, 2091), True, 'import numpy as np\n'), ((2128, 2169), 'os.path.join', 'osp.join', (['self.annot_path', '"""skeleton.txt"""'], {}), "(self.annot_path, 'skeleton.txt')\n", (2136, 2169), True, 'import os.path as osp\n'), ((2503, 2597), 'os.path.join', 'osp.join', (['self.annot_path', 'self.mode', "(prefix + 'InterHand2.6M_' + self.mode + '_data.json')"], {}), "(self.annot_path, self.mode, prefix + 'InterHand2.6M_' + self.mode +\n '_data.json')\n", (2511, 2597), True, 'import os.path as osp\n'), ((2724, 2736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2733, 2736), False, 'import json\n'), ((2869, 2881), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2878, 2881), False, 'import json\n'), ((3788, 3840), 'os.path.join', 'osp.join', (['self.img_path', 'self.mode', "img['file_name']"], {}), "(self.img_path, self.mode, img['file_name'])\n", (3796, 3840), True, 'import os.path as osp\n'), ((4528, 4564), 'utils.transforms.cam2pixel', 'cam2pixel', (['joint_cam', 'focal', 'princpt'], {}), '(joint_cam, focal, princpt)\n', (4537, 4564), False, 'from utils.transforms import world2cam, cam2pixel, pixel2cam\n'), ((5251, 5301), 'numpy.array', 'np.array', (["ann['hand_type_valid']"], {'dtype': 'np.float32'}), "(ann['hand_type_valid'], dtype=np.float32)\n", (5259, 5301), True, 'import numpy as np\n'), ((5770, 5809), 'numpy.array', 'np.array', (["ann['bbox']"], {'dtype': 'np.float32'}), "(ann['bbox'], dtype=np.float32)\n", (5778, 5809), True, 'import numpy as np\n'), ((5838, 5881), 'utils.preprocessing.process_bbox', 'process_bbox', (['bbox', '(img_height, img_width)'], {}), '(bbox, (img_height, img_width))\n', (5850, 5881), False, 'from utils.preprocessing import load_img, load_skeleton, get_bbox, process_bbox, augmentation, transform_input_to_output_space, trans_point2d\n'), ((7079, 7113), 'numpy.array', 'np.array', (['[1, 0]'], {'dtype': 'np.float32'}), '([1, 0], dtype=np.float32)\n', (7087, 7113), True, 'import numpy as np\n'), ((13999, 14046), 'utils.transforms.pixel2cam', 'pixel2cam', (['pred_joint_coord_img', 'focal', 'princpt'], {}), '(pred_joint_coord_img, focal, princpt)\n', (14008, 14046), False, 'from utils.transforms import world2cam, cam2pixel, pixel2cam\n'), ((17752, 17776), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (17767, 17776), False, 'import sys\n'), ((2395, 2431), 'os.path.join', 'osp.join', (['self.annot_path', 'self.mode'], {}), '(self.annot_path, self.mode)\n', (2403, 2431), True, 'import os.path as osp\n'), ((2611, 2698), 'os.path.join', 'osp.join', (['self.annot_path', 'self.mode', "('InterHand2.6M_' + self.mode + '_camera.json')"], {}), "(self.annot_path, self.mode, 'InterHand2.6M_' + self.mode +\n '_camera.json')\n", (2619, 2698), True, 'import os.path as osp\n'), ((2755, 2844), 'os.path.join', 'osp.join', (['self.annot_path', 'self.mode', "('InterHand2.6M_' + self.mode + '_joint_3d.json')"], {}), "(self.annot_path, self.mode, 'InterHand2.6M_' + self.mode +\n '_joint_3d.json')\n", (2763, 2844), True, 'import os.path as osp\n'), ((7166, 7200), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (7174, 7200), True, 'import numpy as np\n'), ((8112, 8182), 'cv2.imread', 'cv2.imread', (['img_path', '(cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)'], {}), '(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n', (8122, 8182), False, 'import cv2\n'), ((12327, 12383), 'utils.preprocessing.trans_point2d', 'trans_point2d', (['pred_joint_coord_img[j, :2]', 'inv_trans[n]'], {}), '(pred_joint_coord_img[j, :2], inv_trans[n])\n', (12340, 12383), False, 'from utils.preprocessing import load_img, load_skeleton, get_bbox, process_bbox, augmentation, transform_input_to_output_space, trans_point2d\n'), ((15507, 15577), 'cv2.imread', 'cv2.imread', (['img_path', '(cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)'], {}), '(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n', (15517, 15577), False, 'import cv2\n'), ((15953, 16017), 'utils.vis.vis_keypoints', 'vis_keypoints', (['_img', 'vis_kps', 'vis_valid', 'self.skeleton', 'filename'], {}), '(_img, vis_kps, vis_valid, self.skeleton, filename)\n', (15966, 16017), False, 'from utils.vis import vis_keypoints, vis_3d_keypoints, plot_hand\n'), ((16134, 16210), 'utils.vis.vis_3d_keypoints', 'vis_3d_keypoints', (['pred_joint_coord_cam', 'joint_valid', 'self.skeleton', 'filename'], {}), '(pred_joint_coord_cam, joint_valid, self.skeleton, filename)\n', (16150, 16210), False, 'from utils.vis import vis_keypoints, vis_3d_keypoints, plot_hand\n'), ((16852, 16868), 'numpy.mean', 'np.mean', (['tot_err'], {}), '(tot_err)\n', (16859, 16868), True, 'import numpy as np\n'), ((17014, 17035), 'numpy.stack', 'np.stack', (['mpjpe_sh[j]'], {}), '(mpjpe_sh[j])\n', (17022, 17035), True, 'import numpy as np\n'), ((17240, 17257), 'numpy.mean', 'np.mean', (['mpjpe_sh'], {}), '(mpjpe_sh)\n', (17247, 17257), True, 'import numpy as np\n'), ((17403, 17424), 'numpy.stack', 'np.stack', (['mpjpe_ih[j]'], {}), '(mpjpe_ih[j])\n', (17411, 17424), True, 'import numpy as np\n'), ((17634, 17651), 'numpy.mean', 'np.mean', (['mpjpe_ih'], {}), '(mpjpe_ih)\n', (17641, 17651), True, 'import numpy as np\n'), ((2934, 2945), 'time.time', 'time.time', ([], {}), '()\n', (2943, 2945), False, 'import time\n'), ((4680, 4726), 'numpy.array', 'np.array', (["ann['joint_valid']"], {'dtype': 'np.float32'}), "(ann['joint_valid'], dtype=np.float32)\n", (4688, 4726), True, 'import numpy as np\n'), ((7260, 7294), 'numpy.array', 'np.array', (['[1, 1]'], {'dtype': 'np.float32'}), '([1, 1], dtype=np.float32)\n', (7268, 7294), True, 'import numpy as np\n'), ((13063, 13117), 'utils.transforms.pixel2cam', 'pixel2cam', (['pred_left_root_img[None, :]', 'focal', 'princpt'], {}), '(pred_left_root_img[None, :], focal, princpt)\n', (13072, 13117), False, 'from utils.transforms import world2cam, cam2pixel, pixel2cam\n'), ((13324, 13379), 'utils.transforms.pixel2cam', 'pixel2cam', (['pred_right_root_img[None, :]', 'focal', 'princpt'], {}), '(pred_right_root_img[None, :], focal, princpt)\n', (13333, 13379), False, 'from utils.transforms import world2cam, cam2pixel, pixel2cam\n'), ((16568, 16589), 'numpy.stack', 'np.stack', (['mpjpe_sh[j]'], {}), '(mpjpe_sh[j])\n', (16576, 16589), True, 'import numpy as np\n'), ((16591, 16612), 'numpy.stack', 'np.stack', (['mpjpe_ih[j]'], {}), '(mpjpe_ih[j])\n', (16599, 16612), True, 'import numpy as np\n'), ((13636, 13678), 'numpy.sum', 'np.sum', (['((pred_rel_root - gt_rel_root) ** 2)'], {}), '((pred_rel_root - gt_rel_root) ** 2)\n', (13642, 13678), True, 'import numpy as np\n'), ((14652, 14710), 'numpy.sum', 'np.sum', (['((pred_joint_coord_cam[j] - gt_joint_coord[j]) ** 2)'], {}), '((pred_joint_coord_cam[j] - gt_joint_coord[j]) ** 2)\n', (14658, 14710), True, 'import numpy as np\n'), ((14788, 14846), 'numpy.sum', 'np.sum', (['((pred_joint_coord_cam[j] - gt_joint_coord[j]) ** 2)'], {}), '((pred_joint_coord_cam[j] - gt_joint_coord[j]) ** 2)\n', (14794, 14846), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import astropy.io.fits as pyfits
import numpy as np
import matplotlib.pyplot as plt
import cv2
from matplotlib import gridspec
filename = '/home/bquint/Data/SAM/Lateral_Glowing/DARK180s.fits'
data = pyfits.getdata(filename=filename)
print(data.mean())
print(np.median(data))
# gs = gridspec.GridSpec(2, 2, width_ratios=[1, 9], height_ratios=[9, 1])
#
# ax1 = plt.subplot(gs[0, 1])
# ax1.imshow(data, origin='lower', interpolation='nearest', cmap='cubehelix')
# ax1.set_xticklabels([])
# ax1.set_yticklabels([])
# ax1.set_xlim(0, data.shape[0])
# ax1.set_ylim(0, data.shape[1])
#
# x = data.sum(axis=1)
# y = np.arange(x.size)
# ax2 = plt.subplot(gs[0, 0], sharey=ax1)
# ax2.plot(x, y, 'k-')
# ax2.set_ylim(0, y.size)
#
# y = data.sum(axis=0)
# x = np.arange(y.size)
# ax3 = plt.subplot(gs[1, 1], sharex=ax1)
# ax3.plot(x, y, 'k-')
# ax3.set_xlim(0, x.size)
#
# plt.show()
img = cv2.medianBlur(data, 5)
print((img == np.nan).any())
print((img == -np.infty).any())
print((img == +np.infty).any())
print(img.__class__)
print(np.min(img))
print(np.max(img))
print(np.mean(img))
print(np.median(img))
plt.imshow(img, 'gray')
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.median",
"cv2.medianBlur",
"numpy.max",
"astropy.io.fits.getdata",
"numpy.min",
"matplotlib.pyplot.show"
] | [((247, 280), 'astropy.io.fits.getdata', 'pyfits.getdata', ([], {'filename': 'filename'}), '(filename=filename)\n', (261, 280), True, 'import astropy.io.fits as pyfits\n'), ((928, 951), 'cv2.medianBlur', 'cv2.medianBlur', (['data', '(5)'], {}), '(data, 5)\n', (942, 951), False, 'import cv2\n'), ((1148, 1171), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img', '"""gray"""'], {}), "(img, 'gray')\n", (1158, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1172, 1182), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1180, 1182), True, 'import matplotlib.pyplot as plt\n'), ((306, 321), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (315, 321), True, 'import numpy as np\n'), ((1073, 1084), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (1079, 1084), True, 'import numpy as np\n'), ((1092, 1103), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1098, 1103), True, 'import numpy as np\n'), ((1111, 1123), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (1118, 1123), True, 'import numpy as np\n'), ((1131, 1145), 'numpy.median', 'np.median', (['img'], {}), '(img)\n', (1140, 1145), True, 'import numpy as np\n')] |
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""User-API for the pyemma.coordinates package
.. currentmodule:: pyemma.coordinates.api
"""
from pyemma.util.log import getLogger as _getLogger
from pyemma.util import types as _types
from pyemma.coordinates.pipelines import Discretizer as _Discretizer
from pyemma.coordinates.pipelines import Pipeline as _Pipeline
# io
from pyemma.coordinates.data.featurizer import MDFeaturizer as _MDFeaturizer
from pyemma.coordinates.data.feature_reader import FeatureReader as _FeatureReader
from pyemma.coordinates.data.data_in_memory import DataInMemory as _DataInMemory
from pyemma.coordinates.data.util.reader_utils import create_file_reader as _create_file_reader, \
preallocate_empty_trajectory as _preallocate_empty_trajectory, enforce_top as _enforce_top, \
copy_traj_attributes as _copy_traj_attributes
from pyemma.coordinates.data.frames_from_file import frames_from_file as _frames_from_file
# transforms
from pyemma.coordinates.transform.transformer import Transformer as _Transformer
from pyemma.coordinates.transform.pca import PCA as _PCA
from pyemma.coordinates.transform.tica import TICA as _TICA
# clustering
from pyemma.coordinates.clustering.kmeans import KmeansClustering as _KmeansClustering
from pyemma.coordinates.clustering.kmeans import MiniBatchKmeansClustering as _MiniBatchKmeansClustering
from pyemma.coordinates.clustering.uniform_time import UniformTimeClustering as _UniformTimeClustering
from pyemma.coordinates.clustering.regspace import RegularSpaceClustering as _RegularSpaceClustering
from pyemma.coordinates.clustering.assign import AssignCenters as _AssignCenters
# stat
from pyemma.coordinates.util.stat import histogram
# types
from mdtraj import Topology as _Topology, Trajectory as _Trajectory
from six import string_types
from six.moves import range
from six.moves import zip
import numpy as _np
import itertools as _itertools
_logger = _getLogger('coordinates.api')
__docformat__ = "restructuredtext en"
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2015, Computational Molecular Biology Group, FU-Berlin"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "FreeBSD"
__maintainer__ = "<NAME>"
__email__ = "m.scherer AT fu-berlin DOT de"
__all__ = ['featurizer', # IO
'load',
'source',
'histogram',
'pipeline',
'discretizer',
'save_traj',
'save_trajs',
'pca', # transform
'tica',
'cluster_regspace', # cluster
'cluster_kmeans',
'cluster_uniform_time',
'assign_to_centers',
]
# ==============================================================================
#
# DATA PROCESSING
#
# ==============================================================================
def featurizer(topfile):
r""" Featurizer to select features from MD data.
Parameters
----------
topfile : str
path to topology file (e.g pdb file)
Returns
-------
feat : :class:`Featurizer <pyemma.coordinates.data.featurizer.MDFeaturizer>`
See also
--------
data.MDFeaturizer
Examples
--------
Create a featurizer and add backbone torsion angles to active features.
Then use it in :func:`source`
>>> import pyemma.coordinates # doctest: +SKIP
>>> feat = pyemma.coordinates.featurizer('my_protein.pdb') # doctest: +SKIP
>>> feat.add_backbone_torsions() # doctest: +SKIP
>>> reader = pyemma.coordinates.source(["my_traj01.xtc", "my_traj02.xtc"], features=feat) # doctest: +SKIP
.. autoclass:: pyemma.coordinates.data.featurizer.MDFeaturizer
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.data.featurizer.MDFeaturizer
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.data.featurizer.MDFeaturizer
:attributes:
"""
return _MDFeaturizer(topfile)
# TODO: DOC - which topology file formats does mdtraj support? Find out and complete docstring
def load(trajfiles, features=None, top=None, stride=1, chunk_size=100):
r""" Loads coordinate features into memory.
If your memory is not big enough consider the use of **pipeline**, or use
the stride option to subsample the data.
Parameters
----------
trajfiles : str or list of str
A filename or a list of filenames to trajectory files that can be
processed by pyemma. Both molecular dynamics trajectory files and raw
data files (tabulated ASCII or binary) can be loaded.
When molecular dynamics trajectory files are loaded either a featurizer
must be specified (for reading specific quantities such as distances or
dihedrals), or a topology file (in that case only Cartesian coordinates
will be read). In the latter case, the resulting feature vectors will
have length 3N for each trajectory frame, with N being the number of
atoms and (x1, y1, z1, x2, y2, z2, ...) being the sequence of
coordinates in the vector.
Molecular dynamics trajectory files are loaded through mdtraj (http://mdtraj.org/latest/),
and can possess any of the mdtraj-compatible trajectory formats
including:
* CHARMM/NAMD (.dcd)
* Gromacs (.xtc)
* Gromacs (.trr)
* AMBER (.binpos)
* AMBER (.netcdf)
* PDB trajectory format (.pdb)
* TINKER (.arc),
* MDTRAJ (.hdf5)
* LAMMPS trajectory format (.lammpstrj)
Raw data can be in the following format:
* tabulated ASCII (.dat, .txt)
* binary python (.npy, .npz)
features : MDFeaturizer, optional, default = None
a featurizer object specifying how molecular dynamics files should
be read (e.g. intramolecular distances, angles, dihedrals, etc).
top : str, optional, default = None
A molecular topology file, e.g. in PDB (.pdb) format
stride : int, optional, default = 1
Load only every stride'th frame. By default, every frame is loaded
chunk_size: int, optional, default = 100
The chunk size at which the input file is being processed.
Returns
-------
data : ndarray or list of ndarray
If a single filename was given as an input (and unless the format is
.npz), the return will be a single ndarray of size (T, d), where T is
the number of time steps in the trajectory and d is the number of features
(coordinates, observables). When reading from molecular dynamics data
without a specific featurizer, each feature vector will have size d=3N
and will hold the Cartesian coordinates in the sequence
(x1, y1, z1, x2, y2, z2, ...).
If multiple filenames were given, or if the file is a .npz holding
multiple arrays, the result is a list of appropriately shaped arrays
See also
--------
:func:`pyemma.coordinates.pipeline`
if your memory is not big enough, use pipeline to process it in a
streaming manner
Examples
--------
>>> from pyemma.coordinates import load
>>> files = ['traj01.xtc', 'traj02.xtc'] # doctest: +SKIP
>>> output = load(files, top='my_structure.pdb') # doctest: +SKIP
"""
if isinstance(trajfiles, string_types) or (
isinstance(trajfiles, (list, tuple))
and (any(isinstance(item, string_types) for item in trajfiles) or len(trajfiles) is 0)):
reader = _create_file_reader(trajfiles, top, features, chunk_size=chunk_size)
trajs = reader.get_output(stride=stride)
if len(trajs) == 1:
return trajs[0]
else:
return trajs
else:
raise ValueError('unsupported type (%s) of input' % type(trajfiles))
def source(inp, features=None, top=None, chunk_size=None):
r""" Wraps input as data source for pipeline.
Use this function to construct the first stage of a data processing :func:`pipeline`.
Parameters
----------
inp : str (file name) or ndarray or list of strings (file names) or list
of ndarrays The inp file names or input data. Can be given in any of
these ways:
1. File name of a single trajectory. It can have any of the molecular
dynamics trajectory formats or raw data formats specified in :py:func:`load`.
2. List of trajectory file names. It can have any of the molecular
dynamics trajectory formats or raw data formats specified in :py:func:`load`.
3. Molecular dynamics trajectory in memory as a numpy array of shape
(T, N, 3) with T time steps, N atoms each having three (x,y,z)
spatial coordinates.
4. List of molecular dynamics trajectories in memory, each given as a
numpy array of shape (T_i, N, 3), where trajectory i has T_i time
steps and all trajectories have shape (N, 3).
5. Trajectory of some features or order parameters in memory
as a numpy array of shape (T, N) with T time steps and N dimensions.
6. List of trajectories of some features or order parameters in memory,
each given as a numpy array of shape (T_i, N), where trajectory i
has T_i time steps and all trajectories have N dimensions.
7. List of NumPy array files (.npy) of shape (T, N). Note these
arrays are not being loaded completely, but mapped into memory
(read-only).
8. List of tabulated ASCII files of shape (T, N).
features : MDFeaturizer, optional, default = None
a featurizer object specifying how molecular dynamics files should be
read (e.g. intramolecular distances, angles, dihedrals, etc). This
parameter only makes sense if the input comes in the form of molecular
dynamics trajectories or data, and will otherwise create a warning and
have no effect.
top : str, optional, default = None
A topology file name. This is needed when molecular dynamics
trajectories are given and no featurizer is given.
In this case, only the Cartesian coordinates will be read.
chunk_size: int, optional, default = 100 for file readers and 5000 for
already loaded data The chunk size at which the input file is being
processed.
Returns
-------
reader obj: type depends on input data
1. :class:`FeatureReader <pyemma.coordinates.data.feature_reader.FeatureReader>` for MD-data
2. :class:`NumPyFileReader <pyemma.coordinates.data.numpy_filereader.NumPyFileReader>` for .npy files
3. :class:`PyCSVReader <pyemma.coordinates.data.py_csv_reader.PyCSVReader>` for csv files.
4. :class:`DataInMemory <pyemma.coordinates.data.data_in_memory.DataInMemory>` for already loaded data (e.g NumPy arrays)
See also
--------
:func:`pyemma.coordinates.pipeline`
The data input is the first stage for your pipeline. Add other stages
to it and build a pipeline to analyze big data in streaming mode.
Examples
--------
Create a reader for NumPy files:
>>> import numpy as np
>>> from pyemma.coordinates import source
>>> reader = source(['001.npy', '002.npy'] # doctest: +SKIP
Create a reader for trajectory files and select some distance as feature:
>>> reader = source(['traj01.xtc', 'traj02.xtc'], top='my_structure.pdb') # doctest: +SKIP
>>> reader.featurizer.add_distances([[0, 1], [5, 6]]) # doctest: +SKIP
>>> calculated_features = reader.get_output() # doctest: +SKIP
create a reader for a csv file:
>>> reader = source('data.csv') # doctest: +SKIP
Create a reader for huge NumPy in-memory arrays to process them in
huge chunks to avoid memory issues:
>>> data = np.random.random(int(1e7))
>>> reader = source(data, chunk_size=5000)
>>> from pyemma.coordinates import cluster_regspace
>>> regspace = cluster_regspace(reader, dmin=0.1)
"""
# CASE 1: input is a string or list of strings
# check: if single string create a one-element list
if isinstance(inp, string_types) or (isinstance(inp, (list, tuple))
and (any(isinstance(item, string_types) for item in inp) or len(inp) is 0)):
reader = _create_file_reader(inp, top, features, chunk_size=chunk_size if chunk_size else 100)
elif isinstance(inp, _np.ndarray) or (isinstance(inp, (list, tuple))
and (any(isinstance(item, _np.ndarray) for item in inp) or len(inp) is 0)):
# CASE 2: input is a (T, N, 3) array or list of (T_i, N, 3) arrays
# check: if single array, create a one-element list
# check: do all arrays have compatible dimensions (*, N, 3)? If not: raise ValueError.
# check: if single array, create a one-element list
# check: do all arrays have compatible dimensions (*, N)? If not: raise ValueError.
# create MemoryReader
reader = _DataInMemory(inp, chunksize=chunk_size if chunk_size else 5000)
else:
raise ValueError('unsupported type (%s) of input' % type(inp))
return reader
def pipeline(stages, run=True, stride=1, chunksize=100):
r""" Data analysis pipeline.
Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it
(unless prevented).
If this function takes too long, consider loading data in memory.
Alternatively if the data is to large to be loaded into memory make use
of the stride parameter.
Parameters
----------
stages : data input or list of pipeline stages
If given a single pipeline stage this must be a data input constructed
by :py:func:`source`. If a list of pipelining stages are given, the
first stage must be a data input constructed by :py:func:`source`.
run : bool, optional, default = True
If True, the pipeline will be parametrized immediately with the given
stages. If only an input stage is given, the run flag has no effect at
this time. True also means that the pipeline will be immediately
re-parametrized when further stages are added to it.
*Attention* True means this function may take a long time to compute.
If False, the pipeline will be passive, i.e. it will not do any
computations before you call parametrize()
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline to
parametrize its stages. Note that this could cause the parametrization
step to be very slow for large data sets. Since molecular dynamics data
is usually correlated at short timescales, it is often sufficient to
parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize : int, optiona, default = 100
how many datapoints to process as a batch at one step
Returns
-------
pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>`
A pipeline object that is able to conduct big data analysis with
limited memory in streaming mode.
Examples
--------
>>> import numpy as np
>>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline
Create some random data and cluster centers:
>>> data = np.random.random((1000, 3))
>>> centers = data[np.random.choice(1000, 10)]
>>> reader = source(data)
Define a TICA transformation with lag time 10:
>>> tica_obj = tica(lag=10)
Assign any input to given centers:
>>> assign = assign_to_centers(centers=centers)
>>> pipe = pipeline([reader, tica_obj, assign])
>>> pipe.parametrize()
.. autoclass:: pyemma.coordinates.pipelines.Pipeline
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:attributes:
"""
if not isinstance(stages, list):
stages = [stages]
p = _Pipeline(stages, param_stride=stride, chunksize=chunksize)
if run:
p.parametrize()
return p
def discretizer(reader,
transform=None,
cluster=None,
run=True,
stride=1,
chunksize=100):
r""" Specialized pipeline: From trajectories to clustering.
Constructs a pipeline that consists of three stages:
1. an input stage (mandatory)
2. a transformer stage (optional)
3. a clustering stage (mandatory)
This function is identical to calling :func:`pipeline` with the three
stages, it is only meant as a guidance for the (probably) most common
usage cases of a pipeline.
Parameters
----------
reader : instance of :class:`pyemma.coordinates.data.reader.ChunkedReader`
The reader instance provides access to the data. If you are working
with MD data, you most likely want to use a FeatureReader.
transform : instance of :class: `pyemma.coordinates.Transformer`
an optional transform like PCA/TICA etc.
cluster : instance of :class: `pyemma.coordinates.AbstractClustering`
clustering Transformer (optional) a cluster algorithm to assign
transformed data to discrete states.
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline
to parametrize its stages. Note that this could cause the
parametrization step to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales,
it is often sufficient to parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize : int, optiona, default = 100
how many datapoints to process as a batch at one step
Returns
-------
pipe : a :class:`Pipeline <pyemma.coordinates.pipelines.Discretizer>` object
A pipeline object that is able to streamline data analysis of large
amounts of input data with limited memory in streaming mode.
Examples
--------
Construct a discretizer pipeline processing all data
with a PCA transformation and cluster the principal components
with uniform time clustering:
>>> import numpy as np
>>> from pyemma.coordinates import source, pca, cluster_regspace, discretizer
>>> from pyemma.datasets import get_bpti_test_data
>>> reader = source(get_bpti_test_data()['trajs'], top=get_bpti_test_data()['top'])
>>> transform = pca(dim=2)
>>> cluster = cluster_regspace(dmin=0.1)
>>> disc = discretizer(reader, transform, cluster)
Finally you want to run the pipeline:
>>> disc.parametrize()
Access the the discrete trajectories and saving them to files:
>>> disc.dtrajs # doctest: +ELLIPSIS
[array([...
This will store the discrete trajectory to "traj01.dtraj":
>>> from pyemma.util.files import TemporaryDirectory
>>> import os
>>> with TemporaryDirectory('dtrajs') as tmpdir:
... disc.save_dtrajs(output_dir=tmpdir)
... sorted(os.listdir(tmpdir))
['bpti_001-033.dtraj', 'bpti_034-066.dtraj', 'bpti_067-100.dtraj']
"""
if cluster is None:
_logger.warning('You did not specify a cluster algorithm.'
' Defaulting to kmeans(k=100)')
cluster = _KmeansClustering(n_clusters=100)
disc = _Discretizer(reader, transform, cluster, param_stride=stride)
if run:
disc.parametrize()
return disc
def save_traj(traj_inp, indexes, outfile, top=None, stride = 1, chunksize=1000, verbose=False):
r""" Saves a sequence of frames as a single trajectory.
Extracts the specified sequence of time/trajectory indexes from traj_inp
and saves it to one single molecular dynamics trajectory file. The output
format will be determined by the outfile name.
Parameters
----------
traj_inp :
traj_inp can be of two types.
1. a python list of strings containing the filenames associated with
the indices in :py:obj:`indexes`. With this type of input, a :py:obj:`topfile` is mandatory.
2. a :py:func:`pyemma.coordinates.data.feature_reader.FeatureReader`
object containing the filename list in :py:obj:`traj_inp.trajfiles`.
Please use :py:func:`pyemma.coordinates.source` to construct it.
With this type of input, the input :py:obj:`topfile` will be ignored.
and :py:obj:`traj_inp.topfile` will be used instead
indexes : ndarray(T, 2) or list of ndarray(T_i, 2)
A (T x 2) array for writing a trajectory of T time steps. Each row
contains two indexes (i, t), where i is the index of the trajectory
from the input and t is the index of the time step within the trajectory.
If a list of index arrays are given, these will be simply concatenated,
i.e. they will be written subsequently in the same trajectory file.
outfile : str.
The name of the output file. Its extension will determine the file type
written. Example: "out.dcd" If set to None, the trajectory object is
returned to memory
top : str, mdtraj.Trajectory, or mdtraj.Topology
The topology needed to read the files in the list :py:obj:`traj_inp`.
If :py:obj:`traj_inp` is not a list, this parameter is ignored.
stride : integer, default is 1
This parameter informs :py:func:`save_traj` about the stride used in
:py:obj:`indexes`. Typically, :py:obj:`indexes` contains frame-indexes
that match exactly the frames of the files contained in :py:obj:`traj_inp.trajfiles`.
However, in certain situations, that might not be the case. Examples
are cases in which a stride value != 1 was used when
reading/featurizing/transforming/discretizing the files contained
in :py:obj:`traj_inp.trajfiles`.
chunksize : int. Default 1000.
The chunksize for reading input trajectory files. If :py:obj:`traj_inp`
is a :py:func:`pyemma.coordinates.data.feature_reader.FeatureReader` object,
this input variable will be ignored and :py:obj:`traj_inp.chunksize` will be used instead.
verbose : boolean, default is False
Verbose output while looking for :py:obj`indexes` in the :py:obj:`traj_inp.trajfiles`
Returns
-------
traj : :py:obj:`mdtraj.Trajectory` object
Will only return this object if :py:obj:`outfile` is None
"""
# Determine the type of input and extract necessary parameters
if isinstance(traj_inp, _FeatureReader):
trajfiles = traj_inp.trajfiles
top = traj_inp.topfile
chunksize = traj_inp.chunksize
else:
# Do we have what we need?
assert isinstance(traj_inp, list), "traj_inp has to be of type list, not %"%type(traj_inp)
assert isinstance(top,(str,_Topology, _Trajectory)), "traj_inp cannot be a list of files without an input " \
"top of type str (eg filename.pdb), mdtraj.Trajectory or mdtraj.Topology. " \
"Got type %s instead"%type(top)
trajfiles = traj_inp
# Enforce the input topology to actually be an md.Topology object
top = _enforce_top(top)
# Convert to index (T,2) array if parsed a list or a list of arrays
indexes = _np.vstack(indexes)
# Check that we've been given enough filenames
assert (len(trajfiles) >= indexes[:,0].max()), "traj_inp contains %u trajfiles, " \
"but indexes will ask for file nr. %u"%(len(trajfiles), indexes[0].max())
# Instantiate a list of iterables that will contain mdtraj trajectory objects
trajectory_iterator_list = []
# Cycle only over files that are actually mentioned in "indexes"
file_idxs, file_pos = _np.unique(indexes[:, 0], return_inverse=True)
for ii, ff in enumerate(file_idxs):
# Slice the indexes array (frame column) where file ff was mentioned
frames = indexes[file_pos == ii, 1]
# Store the trajectory object that comes out of _frames_from_file
# directly as an iterator in trajectory_iterator_list
trajectory_iterator_list.append(_itertools.islice(_frames_from_file(trajfiles[ff],
top,
frames, chunksize=chunksize,
verbose=verbose, stride = stride,
copy_not_join=True),
None)
)
# Prepare the trajectory object
traj = _preallocate_empty_trajectory(top, indexes.shape[0])
# Iterate directly over the index of files and pick the trajectory that you need from the iterator list
for ii, traj_idx in enumerate(file_pos):
# Append the trajectory from the respective list of iterators
# and advance that iterator
traj = _copy_traj_attributes(traj, next(trajectory_iterator_list[traj_idx]), ii)
# Return to memory as an mdtraj trajectory object
if outfile is None:
return traj
# or to disk as a molecular trajectory file
else:
traj.save(outfile)
_logger.info("Created file %s" % outfile)
def save_trajs(traj_inp, indexes, prefix = 'set_', fmt = None, outfiles = None,
inmemory = False, stride = 1, verbose = False):
r""" Saves sequences of frames as multiple trajectories.
Extracts a number of specified sequences of time/trajectory indexes from the
input loader and saves them in a set of molecular dynamics trajectories.
The output filenames are obtained by prefix + str(n) + .fmt, where n counts
the output trajectory and extension is either set by the user, or else
determined from the input. Example: When the input is in dcd format, and
indexes is a list of length 3, the output will by default go to files
"set_1.dcd", "set_2.dcd", "set_3.dcd". If you want files to be stored
in a specific subfolder, simply specify the relative path in the prefix,
e.g. prefix='~/macrostates/\pcca_'
Parameters
----------
traj_inp : :py:class:`pyemma.coordinates.data.feature_reader.FeatureReader`
A data source as provided by Please use :py:func:`pyemma.coordinates.source` to construct it.
indexes : list of ndarray(T_i, 2)
A list of N arrays, each of size (T_n x 2) for writing N trajectories
of T_i time steps. Each row contains two indexes (i, t), where i is the
index of the trajectory from the input and t is the index of the time
step within the trajectory.
prefix : str, optional, default = `set_`
output filename prefix. Can include an absolute or relative path name.
fmt : str, optional, default = None
Outpuf file format. By default, the file extension and format. It will
be determined from the input. If a different format is desired, specify
the corresponding file extension here without a dot, e.g. "dcd" or "xtc".
outfiles : list of str, optional, default = None
A list of output filenames. When given, this will override the settings
of prefix and fmt, and output will be written to these files.
inmemory : Boolean, default = False (untested for large files)
Instead of internally calling traj_save for every (T_i,2) array in
"indexes", only one call is made. Internally, this generates a
potentially large molecular trajectory object in memory that is
subsequently sliced into the files of "outfiles". Should be faster for
large "indexes" arrays and large files, though it is quite memory
intensive. The optimal situation is to avoid streaming two times
through a huge file for "indexes" of type: indexes = [[1 4000000],[1 4000001]]
stride : integer, default is 1
This parameter informs :py:func:`save_trajs` about the stride used in
the indexes variable. Typically, the variable indexes contains frame
indexes that match exactly the frames of the files contained in
traj_inp.trajfiles. However, in certain situations, that might not be
the case. Examples of these situations are cases in which stride
value != 1 was used when reading/featurizing/transforming/discretizing
the files contained in traj_inp.trajfiles.
verbose : boolean, default is False
Verbose output while looking for "indexes" in the "traj_inp.trajfiles"
Returns
-------
outfiles : list of str
The list of absolute paths that the output files have been written to.
"""
# Make sure indexes is iterable
assert _types.is_iterable(indexes), "Indexes must be an iterable of matrices."
# only if 2d-array, convert into a list
if isinstance(indexes, _np.ndarray):
if indexes.ndim == 2:
indexes = [indexes]
# Make sure the elements of that lists are arrays, and that they are shaped properly
for i_indexes in indexes:
assert isinstance(i_indexes, _np.ndarray), "The elements in the 'indexes' variable must be numpy.ndarrays"
assert i_indexes.ndim == 2, \
"The elements in the 'indexes' variable must have ndim = 2, and not %u" % i_indexes.ndim
assert i_indexes.shape[1] == 2, \
"The elements in the 'indexes' variable must be of shape (T_i,2), and not (%u,%u)" % i_indexes.shape
# Determine output format of the molecular trajectory file
if fmt is None:
import os
_, fmt = os.path.splitext(traj_inp.trajfiles[0])
else:
fmt = '.' + fmt
# Prepare the list of outfiles before the loop
if outfiles is None:
outfiles = []
for ii in range(len(indexes)):
outfiles.append(prefix + '%06u' % ii + fmt)
# Check that we have the same name of outfiles as (T, 2)-indexes arrays
if len(indexes) != len(outfiles):
raise Exception('len(indexes) (%s) does not match len(outfiles) (%s)' % (len(indexes), len(outfiles)))
# This implementation looks for "i_indexes" separately, and thus one traj_inp.trajfile
# might be accessed more than once (less memory intensive)
if not inmemory:
for i_indexes, outfile in zip(indexes, outfiles):
# TODO: use **kwargs to parse to save_traj
save_traj(traj_inp, i_indexes, outfile, stride = stride, verbose=verbose)
# This implementation is "one file - one pass" but might temporally create huge memory objects
else:
traj = save_traj(traj_inp, indexes, outfile=None, stride = stride, verbose=verbose)
i_idx = 0
for i_indexes, outfile in zip(indexes, outfiles):
# Create indices for slicing the mdtraj trajectory object
f_idx = i_idx + len(i_indexes)
# print i_idx, f_idx
traj[i_idx:f_idx].save(outfile)
_logger.info("Created file %s" % outfile)
# update the initial frame index
i_idx = f_idx
return outfiles
# =========================================================================
#
# TRANSFORMATION ALGORITHMS
#
# =========================================================================
def _get_input_stage(previous_stage):
# this is a pipelining stage, so let's parametrize from it
if isinstance(previous_stage, _Transformer):
inputstage = previous_stage
# second option: data is array or list of arrays
else:
data = _types.ensure_traj_list(previous_stage)
inputstage = _DataInMemory(data)
return inputstage
def _param_stage(previous_stage, this_stage, stride=1):
r""" Parametrizes the given pipelining stage if a valid source is given.
Parameters
----------
source : one of the following: None, Transformer (subclass), ndarray, list
of ndarrays data source from which this transformer will be parametrized.
If None, there is no input data and the stage will be returned without
any other action.
stage : the transformer object to be parametrized given the source input.
"""
# no input given - nothing to do
if previous_stage is None:
return this_stage
inputstage = _get_input_stage(previous_stage)
# parametrize transformer
this_stage.data_producer = inputstage
this_stage.chunksize = inputstage.chunksize
this_stage.parametrize(stride=stride)
return this_stage
def pca(data=None, dim=2, var_cutoff=0.95, stride=1, mean=None):
r""" Principal Component Analysis (PCA).
PCA is a linear transformation method that finds coordinates of maximal
variance. A linear projection onto the principal components thus makes a
minimal error in terms of variation in the data. Note, however, that this
method is not optimal for Markov model construction because for that
purpose the main objective is to preserve the slow processes which can
sometimes be associated with small variance.
It estimates a PCA transformation from data. When input data is given as an
argument, the estimation will be carried out right away, and the resulting
object can be used to obtain eigenvalues, eigenvectors or project input data
onto the principal components. If data is not given, this object is an
empty estimator and can be put into a :func:`pipeline` in order to use PCA
in streaming mode.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by
source function data array or list of data arrays. T or T_i are the
number of time steps in a trajectory. When data is given, the PCA is
immediately parametrized by estimating the covariance matrix and
computing its eigenvectors.
dim : int, optional, default -1
the number of dimensions (principal components) to project onto. A
call to the :func:`map <pyemma.coordinates.transform.PCA.map>` function reduces the d-dimensional
input to only dim dimensions such that the data preserves the
maximum possible variance amongst dim-dimensional linear projections.
-1 means all numerically available dimensions will be used unless
reduced by var_cutoff. Setting dim to a positive value is exclusive
with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 0.95
Determines the number of output dimensions by including dimensions
until their cumulative kinetic variance exceeds the fraction
subspace_variance. var_cutoff=1.0 means all numerically available
dimensions (see epsilon) will be used, unless set by dim. Setting
var_cutoff smaller than 1.0 is exclusive with dim
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that
this could cause this calculation to be very slow for large data
sets. Since molecular dynamics data is usually correlated at short
timescales, it is often sufficient to estimate transformations at
a longer stride. Note that the stride option in the get_output()
function of the returned object is independent, so you can parametrize
at a long stride, and still map all frames through the transformer.
mean : ndarray, optional, default None
Optionally pass pre-calculated means to avoid their re-computation.
The shape has to match the input dimension.
Returns
-------
pca : a :class:`PCA<pyemma.coordinates.transform.PCA>` transformation object
Object for Principle component analysis (PCA) analysis.
It contains PCA eigenvalues and eigenvectors, and the projection of
input data to the dominant PCA
Notes
-----
Given a sequence of multivariate data :math:`X_t`,
computes the mean-free covariance matrix.
.. math:: C = (X - \mu)^T (X - \mu)
and solves the eigenvalue problem
.. math:: C r_i = \sigma_i r_i,
where :math:`r_i` are the principal components and :math:`\sigma_i` are
their respective variances.
When used as a dimension reduction method, the input data is projected onto
the dominant principal components.
See `Wiki page <http://en.wikipedia.org/wiki/Principal_component_analysis>`_ for more theory and references.
for more theory and references.
Examples
--------
Create some input data:
>>> import numpy as np
>>> from pyemma.coordinates import pca
>>> data = np.ones((1000, 2))
>>> data[0, -1] = 0
Project all input data on the first principal component:
>>> pca_obj = pca(data, dim=1)
>>> pca_obj.get_output() # doctest: +ELLIPSIS
[array([[-0.99900001],
[ 0.001 ],
[ 0.001 ],...
.. autoclass:: pyemma.coordinates.transform.pca.PCA
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.transform.pca.PCA
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.transform.pca.PCA
:attributes:
See also
--------
:class:`PCA <pyemma.coordinates.transform.PCA>` : pca object
:func:`tica <pyemma.coordinates.tica>` : for time-lagged independent component analysis
References
----------
.. [1] <NAME>. 1933.
Analysis of a complex of statistical variables into principal components.
J. Edu. Psych. 24, 417-441 and 498-520.
"""
if mean is not None:
data = _get_input_stage(data)
indim = data.dimension()
mean = _types.ensure_ndarray(mean, shape=(indim,), dtype=_np.float)
res = _PCA(dim=dim, var_cutoff=var_cutoff)
return _param_stage(data, res, stride=stride)
def tica(data=None, lag=10, dim=-1, var_cutoff=0.95, kinetic_map=True, stride=1,
force_eigenvalues_le_one=False, mean=None):
r""" Time-lagged independent component analysis (TICA).
TICA is a linear transformation method. In contrast to PCA, which finds
coordinates of maximal variance, TICA finds coordinates of maximal
autocorrelation at the given lag time. Therefore, TICA is useful in order
to find the *slow* components in a dataset and thus an excellent choice to
transform molecular dynamics data before clustering data for the
construction of a Markov model. When the input data is the result of a
Markov process (such as thermostatted molecular dynamics), TICA finds in
fact an approximation to the eigenfunctions and eigenvalues of the
underlying Markov operator [1]_.
It estimates a TICA transformation from *data*. When input data is given as
an argument, the estimation will be carried out straight away, and the
resulting object can be used to obtain eigenvalues, eigenvectors or project
input data onto the slowest TICA components. If no data is given, this
object is an empty estimator and can be put into a :func:`pipeline` in
order to use TICA in the streaming mode.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by
source function array with the data, if available. When given, the TICA
transformation is immediately computed and can be used to transform data.
lag : int, optional, default = 10
the lag time, in multiples of the input time step
dim : int, optional, default -1
the number of dimensions (independent components) to project onto. A
call to the :func:`map <pyemma.coordinates.transform.TICA.map>` function
reduces the d-dimensional input to only dim dimensions such that the
data preserves the maximum possible autocorrelation amongst
dim-dimensional linear projections. -1 means all numerically available
dimensions will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 0.95
Determines the number of output dimensions by including dimensions
until their cumulative kinetic variance exceeds the fraction
subspace_variance. var_cutoff=1.0 means all numerically available
dimensions (see epsilon) will be used, unless set by dim. Setting
var_cutoff smaller than 1.0 is exclusive with dim
kinetic_map : bool, optional, default False
Eigenvectors will be scaled by eigenvalues. As a result, Euclidean
distances in the transformed data approximate kinetic distances [4]_.
This is a good choice when the data is further processed by clustering.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it is
often sufficient to estimate transformations at a longer stride. Note
that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
force_eigenvalues_le_one : boolean
Compute covariance matrix and time-lagged covariance matrix such
that the generalized eigenvalues are always guaranteed to be <= 1.
mean : ndarray, optional, default None
Optionally pass pre-calculated means to avoid their re-computation.
The shape has to match the input dimension.
Returns
-------
tica : a :class:`TICA <pyemma.coordinates.transform.TICA>` transformation object
Object for time-lagged independent component (TICA) analysis.
it contains TICA eigenvalues and eigenvectors, and the projection of
input data to the dominant TICA
Notes
-----
Given a sequence of multivariate data :math:`X_t`, it computes the
mean-free covariance and time-lagged covariance matrix:
.. math::
C_0 &= (X_t - \mu)^T (X_t - \mu) \\
C_{\tau} &= (X_t - \mu)^T (X_t + \tau - \mu)
and solves the eigenvalue problem
.. math:: C_{\tau} r_i = C_0 \lambda_i r_i,
where :math:`r_i` are the independent components and :math:`\lambda_i` are
their respective normalized time-autocorrelations. The eigenvalues are
related to the relaxation timescale by
.. math::
t_i = -\frac{\tau}{\ln |\lambda_i|}.
When used as a dimension reduction method, the input data is projected
onto the dominant independent components.
TICA was originally introduced for signal processing in [2]_. It was
introduced to molecular dynamics and as a method for the construction
of Markov models in [1]_ and [3]_. It was shown in [1]_ that when applied
to molecular dynamics data, TICA is an approximation to the eigenvalues
and eigenvectors of the true underlying dynamics.
Examples
--------
Invoke TICA transformation with a given lag time and output dimension:
>>> import numpy as np
>>> from pyemma.coordinates import tica
>>> data = np.random.random((100,3))
>>> projected_data = tica(data, lag=2, dim=1).get_output()[0]
For a brief explaination why TICA outperforms PCA to extract a good reaction
coordinate have a look `here
<http://docs.markovmodel.org/lecture_tica.html#Example:-TICA-versus-PCA-in-a-stretched-double-well-potential>`_.
.. autoclass:: pyemma.coordinates.transform.tica.TICA
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.transform.tica.TICA
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.transform.tica.TICA
:attributes:
See also
--------
:class:`TICA <pyemma.coordinates.transform.TICA>` : tica object
:func:`pca <pyemma.coordinates.pca>` : for principal component analysis
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. 2013.
Identification of slow molecular order parameters for Markov model construction
J. Chem. Phys. 139, 015102. doi:10.1063/1.4811489
.. [2] <NAME> and <NAME>uster. 1994.
Separation of a mixture of independent signals using time delayed correlations
Phys. Rev. Lett. 72, 3634.
.. [3] <NAME>, <NAME>. 2013.
Improvements in Markov State Model Construction Reveal Many Non-Native Interactions in the Folding of NTL9
J. Chem. Theory. Comput. 9, 2000-2009. doi:10.1021/ct300878a
.. [4] <NAME>. and <NAME>. 2015.
Kinetic distance and kinetic maps from molecular dynamics simulation
(in preparation).
"""
if mean is not None:
data = _get_input_stage(data)
indim = data.dimension()
mean = _types.ensure_ndarray(mean, shape=(indim,), dtype=_np.float)
res = _TICA(lag, dim=dim, var_cutoff=var_cutoff, kinetic_map=kinetic_map,
force_eigenvalues_le_one=force_eigenvalues_le_one, mean=mean)
return _param_stage(data, res, stride=stride)
# =========================================================================
#
# CLUSTERING ALGORITHMS
#
# =========================================================================
def cluster_mini_batch_kmeans(data=None, k=100, max_iter=10, batch_size=0.2, metric='euclidean', init_strategy='kmeans++'):
res = _MiniBatchKmeansClustering(n_clusters=k, max_iter=max_iter, metric=metric, init_strategy=init_strategy, batch_size=batch_size)
return _param_stage(data, res, stride=1)
def cluster_kmeans(data=None, k=100, max_iter=10, tolerance=1e-5, stride=1,
metric='euclidean', init_strategy='kmeans++', fixed_seed=False):
r"""k-means clustering
If data is given, it performs a k-means clustering and then assigns the
data using a Voronoi discretization. It returns a :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>`
object that can be used to extract the discretized data sequences, or to
assign other data points to the same partition. If data is not given, an
empty :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>`
will be created that still needs to be parametrized, e.g. in a :func:`pipeline`.
.. seealso:: **Theoretical background**: `Wiki page <http://en.wikipedia.org/wiki/K-means_clustering>`_
Parameters
----------
data: ndarray (T, d) or list of ndarray (T_i, d) or a reader created by :func:`source`
input data, if available in memory
k: int
the number of cluster centers
max_iter : int
maximum number of iterations before stopping.
tolerance : float
stop iteration when the relative change in the cost function
.. math:
C(S) = \sum_{i=1}^{k} \sum_{\mathbf x \in S_i} \left\| \mathbf x - \boldsymbol\mu_i \right\|^2
is smaller than tolerance.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it
is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
init_strategy : str
determines if the initial cluster centers are chosen according to the kmeans++-algorithm
or drawn uniformly distributed from the provided data set
fixed_seed : bool
if set to true, the random seed gets fixed resulting in deterministic behavior; default is false
Returns
-------
kmeans : a :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>` clustering object
Object for kmeans clustering.
It holds discrete trajectories and cluster center information.
Examples
--------
>>> import numpy as np
>>> import pyemma.coordinates as coor
>>> traj_data = [np.random.random((100, 3)), np.random.random((100,3))]
>>> cluster_obj = coor.cluster_kmeans(traj_data, k=20, stride=1)
>>> cluster_obj.get_output() # doctest: +ELLIPSIS
[array([...
.. autoclass:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:attributes:
"""
res = _KmeansClustering(n_clusters=k, max_iter=max_iter, metric=metric, tolerance=tolerance,
init_strategy=init_strategy, fixed_seed=fixed_seed)
return _param_stage(data, res, stride=stride)
def cluster_uniform_time(data=None, k=100, stride=1, metric='euclidean'):
r"""Uniform time clustering
If given data, performs a clustering that selects data points uniformly in
time and then assigns the data using a Voronoi discretization. Returns a
:class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` object
that can be used to extract the discretized data sequences, or to assign
other data points to the same partition. If data is not given, an empty
:class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` will be created that
still needs to be parametrized, e.g. in a :func:`pipeline`.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created
by source function input data, if available in memory
k : int
the number of cluster centers
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it is
often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
Returns
-------
uniformTime : a :class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` clustering object
Object for uniform time clustering.
It holds discrete trajectories and cluster center information.
.. autoclass:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:attributes:
"""
res = _UniformTimeClustering(k, metric=metric)
return _param_stage(data, res)
def cluster_regspace(data=None, dmin=-1, max_centers=1000, stride=1, metric='euclidean'):
r"""Regular space clustering
If given data, it performs a regular space clustering [1]_ and returns a
:class:`RegularSpaceClustering <pyemma.coordinates.clustering.RegularSpaceClustering>` object that
can be used to extract the discretized data sequences, or to assign other
data points to the same partition. If data is not given, an empty
:class:`RegularSpaceClustering <pyemma.coordinates.clustering.RegularSpaceClustering>` will be created
that still needs to be parametrized, e.g. in a :func:`pipeline`.
Regular space clustering is very similar to Hartigan's leader algorithm [2]_.
It consists of two passes through the data. Initially, the first data point
is added to the list of centers. For every subsequent data point, if it has
a greater distance than dmin from every center, it also becomes a center.
In the second pass, a Voronoi discretization with the computed centers is
used to partition the data.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by :func:`source
input data, if available in memory
dmin : float
the minimal distance between cluster centers
max_centers : int (optional), default=1000
If max_centers is reached, the algorithm will stop to find more centers,
but it is possible that parts of the state space are not properly `
discretized. This will generate a warning. If that happens, it is
suggested to increase dmin such that the number of centers stays below
max_centers.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it is
often sufficient to estimate transformations at a longer stride. Note
that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
Returns
-------
regSpace : a :class:`RegularSpaceClustering <pyemma.coordinates.clustering.RegularSpaceClustering>` clustering object
Object for regular space clustering.
It holds discrete trajectories and cluster center information.
.. autoclass:: pyemma.coordinates.clustering.regspace.RegularSpaceClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.regspace.RegularSpaceClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.regspace.RegularSpaceClustering
:attributes:
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. 2011.
Markov models of molecular kinetics: Generation and Validation.
J. Chem. Phys. 134, 174105.
.. [2] <NAME>. Clustering algorithms.
New York: Wiley; 1975.
"""
if dmin == -1:
raise ValueError("provide a minimum distance for clustering, e.g. 2.0")
res = _RegularSpaceClustering(dmin, max_centers, metric=metric)
return _param_stage(data, res, stride=stride)
def assign_to_centers(data=None, centers=None, stride=1, return_dtrajs=True,
metric='euclidean'):
r"""Assigns data to the nearest cluster centers
Creates a Voronoi partition with the given cluster centers. If given
trajectories as data, this function will by default discretize the
trajectories and return discrete trajectories of corresponding lengths.
Otherwise, an assignment object will be returned that can be used to
assign data later or can serve as a pipeline stage.
Parameters
----------
data : ndarray or list of arrays or reader created by source function
data to be assigned
centers : path to file or ndarray or a reader created by source function
cluster centers to use in assignment of data
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that
this could cause this calculation to be very slow for large data sets.
Since molecular dynamics data is usually correlated at short timescales,
it is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the
returned object is independent, so you can parametrize at a long stride,
and still map all frames through the transformer.
return_dtrajs : bool, optional, default = True
If True, it will return the discretized trajectories obtained from
assigning the coordinates in the data input. This will only have effect
if data is given. When data is not given or return_dtrajs is False,
the :class:'AssignCenters <_AssignCenters>' object will be returned.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
Returns
-------
assignment : list of integer arrays or an :class:`AssignCenters <pyemma.coordinates.clustering.AssignCenters>` object
assigned data
Examples
--------
Load data to assign to clusters from 'my_data.csv' by using the cluster
centers from file 'my_centers.csv'
>>> import numpy as np
Generate some random data and choose 10 random centers:
>>> data = np.random.random((100, 3))
>>> cluster_centers = data[np.random.randint(0, 99, size=10)]
>>> dtrajs = assign_to_centers(data, cluster_centers)
>>> print(dtrajs) # doctest: +ELLIPSIS
[array([...
"""
if centers is None:
raise ValueError('You have to provide centers in form of a filename'
' or NumPy array or a reader created by source function')
res = _AssignCenters(centers, metric=metric)
parametrized_stage = _param_stage(data, res, stride=stride)
if return_dtrajs and data is not None:
return parametrized_stage.dtrajs
return parametrized_stage
| [
"pyemma.coordinates.transform.tica.TICA",
"pyemma.coordinates.pipelines.Discretizer",
"pyemma.coordinates.clustering.kmeans.KmeansClustering",
"pyemma.util.log.getLogger",
"pyemma.coordinates.data.frames_from_file.frames_from_file",
"pyemma.coordinates.clustering.assign.AssignCenters",
"pyemma.coordinat... | [((3302, 3331), 'pyemma.util.log.getLogger', '_getLogger', (['"""coordinates.api"""'], {}), "('coordinates.api')\n", (3312, 3331), True, 'from pyemma.util.log import getLogger as _getLogger\n'), ((5360, 5382), 'pyemma.coordinates.data.featurizer.MDFeaturizer', '_MDFeaturizer', (['topfile'], {}), '(topfile)\n', (5373, 5382), True, 'from pyemma.coordinates.data.featurizer import MDFeaturizer as _MDFeaturizer\n'), ((17679, 17738), 'pyemma.coordinates.pipelines.Pipeline', '_Pipeline', (['stages'], {'param_stride': 'stride', 'chunksize': 'chunksize'}), '(stages, param_stride=stride, chunksize=chunksize)\n', (17688, 17738), True, 'from pyemma.coordinates.pipelines import Pipeline as _Pipeline\n'), ((21121, 21182), 'pyemma.coordinates.pipelines.Discretizer', '_Discretizer', (['reader', 'transform', 'cluster'], {'param_stride': 'stride'}), '(reader, transform, cluster, param_stride=stride)\n', (21133, 21182), True, 'from pyemma.coordinates.pipelines import Discretizer as _Discretizer\n'), ((25018, 25035), 'pyemma.coordinates.data.util.reader_utils.enforce_top', '_enforce_top', (['top'], {}), '(top)\n', (25030, 25035), True, 'from pyemma.coordinates.data.util.reader_utils import create_file_reader as _create_file_reader, preallocate_empty_trajectory as _preallocate_empty_trajectory, enforce_top as _enforce_top, copy_traj_attributes as _copy_traj_attributes\n'), ((25123, 25142), 'numpy.vstack', '_np.vstack', (['indexes'], {}), '(indexes)\n', (25133, 25142), True, 'import numpy as _np\n'), ((25622, 25668), 'numpy.unique', '_np.unique', (['indexes[:, 0]'], {'return_inverse': '(True)'}), '(indexes[:, 0], return_inverse=True)\n', (25632, 25668), True, 'import numpy as _np\n'), ((26603, 26655), 'pyemma.coordinates.data.util.reader_utils.preallocate_empty_trajectory', '_preallocate_empty_trajectory', (['top', 'indexes.shape[0]'], {}), '(top, indexes.shape[0])\n', (26632, 26655), True, 'from pyemma.coordinates.data.util.reader_utils import create_file_reader as _create_file_reader, preallocate_empty_trajectory as _preallocate_empty_trajectory, enforce_top as _enforce_top, copy_traj_attributes as _copy_traj_attributes\n'), ((30684, 30711), 'pyemma.util.types.is_iterable', '_types.is_iterable', (['indexes'], {}), '(indexes)\n', (30702, 30711), True, 'from pyemma.util import types as _types\n'), ((39686, 39722), 'pyemma.coordinates.transform.pca.PCA', '_PCA', ([], {'dim': 'dim', 'var_cutoff': 'var_cutoff'}), '(dim=dim, var_cutoff=var_cutoff)\n', (39690, 39722), True, 'from pyemma.coordinates.transform.pca import PCA as _PCA\n'), ((46949, 47082), 'pyemma.coordinates.transform.tica.TICA', '_TICA', (['lag'], {'dim': 'dim', 'var_cutoff': 'var_cutoff', 'kinetic_map': 'kinetic_map', 'force_eigenvalues_le_one': 'force_eigenvalues_le_one', 'mean': 'mean'}), '(lag, dim=dim, var_cutoff=var_cutoff, kinetic_map=kinetic_map,\n force_eigenvalues_le_one=force_eigenvalues_le_one, mean=mean)\n', (46954, 47082), True, 'from pyemma.coordinates.transform.tica import TICA as _TICA\n'), ((47462, 47592), 'pyemma.coordinates.clustering.kmeans.MiniBatchKmeansClustering', '_MiniBatchKmeansClustering', ([], {'n_clusters': 'k', 'max_iter': 'max_iter', 'metric': 'metric', 'init_strategy': 'init_strategy', 'batch_size': 'batch_size'}), '(n_clusters=k, max_iter=max_iter, metric=metric,\n init_strategy=init_strategy, batch_size=batch_size)\n', (47488, 47592), True, 'from pyemma.coordinates.clustering.kmeans import MiniBatchKmeansClustering as _MiniBatchKmeansClustering\n'), ((50933, 51076), 'pyemma.coordinates.clustering.kmeans.KmeansClustering', '_KmeansClustering', ([], {'n_clusters': 'k', 'max_iter': 'max_iter', 'metric': 'metric', 'tolerance': 'tolerance', 'init_strategy': 'init_strategy', 'fixed_seed': 'fixed_seed'}), '(n_clusters=k, max_iter=max_iter, metric=metric, tolerance\n =tolerance, init_strategy=init_strategy, fixed_seed=fixed_seed)\n', (50950, 51076), True, 'from pyemma.coordinates.clustering.kmeans import KmeansClustering as _KmeansClustering\n'), ((53337, 53377), 'pyemma.coordinates.clustering.uniform_time.UniformTimeClustering', '_UniformTimeClustering', (['k'], {'metric': 'metric'}), '(k, metric=metric)\n', (53359, 53377), True, 'from pyemma.coordinates.clustering.uniform_time import UniformTimeClustering as _UniformTimeClustering\n'), ((56842, 56899), 'pyemma.coordinates.clustering.regspace.RegularSpaceClustering', '_RegularSpaceClustering', (['dmin', 'max_centers'], {'metric': 'metric'}), '(dmin, max_centers, metric=metric)\n', (56865, 56899), True, 'from pyemma.coordinates.clustering.regspace import RegularSpaceClustering as _RegularSpaceClustering\n'), ((59577, 59615), 'pyemma.coordinates.clustering.assign.AssignCenters', '_AssignCenters', (['centers'], {'metric': 'metric'}), '(centers, metric=metric)\n', (59591, 59615), True, 'from pyemma.coordinates.clustering.assign import AssignCenters as _AssignCenters\n'), ((8962, 9030), 'pyemma.coordinates.data.util.reader_utils.create_file_reader', '_create_file_reader', (['trajfiles', 'top', 'features'], {'chunk_size': 'chunk_size'}), '(trajfiles, top, features, chunk_size=chunk_size)\n', (8981, 9030), True, 'from pyemma.coordinates.data.util.reader_utils import create_file_reader as _create_file_reader, preallocate_empty_trajectory as _preallocate_empty_trajectory, enforce_top as _enforce_top, copy_traj_attributes as _copy_traj_attributes\n'), ((13785, 13875), 'pyemma.coordinates.data.util.reader_utils.create_file_reader', '_create_file_reader', (['inp', 'top', 'features'], {'chunk_size': '(chunk_size if chunk_size else 100)'}), '(inp, top, features, chunk_size=chunk_size if chunk_size\n else 100)\n', (13804, 13875), True, 'from pyemma.coordinates.data.util.reader_utils import create_file_reader as _create_file_reader, preallocate_empty_trajectory as _preallocate_empty_trajectory, enforce_top as _enforce_top, copy_traj_attributes as _copy_traj_attributes\n'), ((21076, 21109), 'pyemma.coordinates.clustering.kmeans.KmeansClustering', '_KmeansClustering', ([], {'n_clusters': '(100)'}), '(n_clusters=100)\n', (21093, 21109), True, 'from pyemma.coordinates.clustering.kmeans import KmeansClustering as _KmeansClustering\n'), ((31552, 31591), 'os.path.splitext', 'os.path.splitext', (['traj_inp.trajfiles[0]'], {}), '(traj_inp.trajfiles[0])\n', (31568, 31591), False, 'import os\n'), ((32257, 32279), 'six.moves.zip', 'zip', (['indexes', 'outfiles'], {}), '(indexes, outfiles)\n', (32260, 32279), False, 'from six.moves import zip\n'), ((32676, 32698), 'six.moves.zip', 'zip', (['indexes', 'outfiles'], {}), '(indexes, outfiles)\n', (32679, 32698), False, 'from six.moves import zip\n'), ((33487, 33526), 'pyemma.util.types.ensure_traj_list', '_types.ensure_traj_list', (['previous_stage'], {}), '(previous_stage)\n', (33510, 33526), True, 'from pyemma.util import types as _types\n'), ((33548, 33567), 'pyemma.coordinates.data.data_in_memory.DataInMemory', '_DataInMemory', (['data'], {}), '(data)\n', (33561, 33567), True, 'from pyemma.coordinates.data.data_in_memory import DataInMemory as _DataInMemory\n'), ((39614, 39674), 'pyemma.util.types.ensure_ndarray', '_types.ensure_ndarray', (['mean'], {'shape': '(indim,)', 'dtype': '_np.float'}), '(mean, shape=(indim,), dtype=_np.float)\n', (39635, 39674), True, 'from pyemma.util import types as _types\n'), ((46878, 46938), 'pyemma.util.types.ensure_ndarray', '_types.ensure_ndarray', (['mean'], {'shape': '(indim,)', 'dtype': '_np.float'}), '(mean, shape=(indim,), dtype=_np.float)\n', (46899, 46938), True, 'from pyemma.util import types as _types\n'), ((14488, 14552), 'pyemma.coordinates.data.data_in_memory.DataInMemory', '_DataInMemory', (['inp'], {'chunksize': '(chunk_size if chunk_size else 5000)'}), '(inp, chunksize=chunk_size if chunk_size else 5000)\n', (14501, 14552), True, 'from pyemma.coordinates.data.data_in_memory import DataInMemory as _DataInMemory\n'), ((26024, 26147), 'pyemma.coordinates.data.frames_from_file.frames_from_file', '_frames_from_file', (['trajfiles[ff]', 'top', 'frames'], {'chunksize': 'chunksize', 'verbose': 'verbose', 'stride': 'stride', 'copy_not_join': '(True)'}), '(trajfiles[ff], top, frames, chunksize=chunksize, verbose=\n verbose, stride=stride, copy_not_join=True)\n', (26041, 26147), True, 'from pyemma.coordinates.data.frames_from_file import frames_from_file as _frames_from_file\n')] |
#!/usr/bin/env python
"@package ReadForceField Read force field from a file and print information out."
from forcebalance.parser import parse_inputs
from forcebalance.forcefield import FF
from forcebalance.nifty import printcool
from sys import argv
import os
import numpy as np
def main():
## Set some basic options. Note that 'forcefield' requires 'ffdir'
## which indicates the relative path of the force field.
options, tgt_opts = parse_inputs(argv[1])
MyFF = FF(options)
Prec=int(argv[2])
if 'read_mvals' in options:
mvals = np.array(options['read_mvals'])
else:
mvals = np.zeros(len(MyFF.pvals0))
MyFF.make(mvals,False,'NewFF',precision=Prec)
if __name__ == "__main__":
main()
| [
"numpy.array",
"forcebalance.forcefield.FF",
"forcebalance.parser.parse_inputs"
] | [((451, 472), 'forcebalance.parser.parse_inputs', 'parse_inputs', (['argv[1]'], {}), '(argv[1])\n', (463, 472), False, 'from forcebalance.parser import parse_inputs\n'), ((484, 495), 'forcebalance.forcefield.FF', 'FF', (['options'], {}), '(options)\n', (486, 495), False, 'from forcebalance.forcefield import FF\n'), ((566, 597), 'numpy.array', 'np.array', (["options['read_mvals']"], {}), "(options['read_mvals'])\n", (574, 597), True, 'import numpy as np\n')] |
#encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import xgboost as xgb
#import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
print("Starting job at time:",time.time())
debug = True
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates = ["activation_date"])
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
train_df = shuffle(train_df, random_state=1234); train_df = train_df.iloc[:10000]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=1000, parse_dates = ["activation_date"])
train_active = pd.read_csv("../input/train_active.csv", nrows=1000, usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", nrows=1000, usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", nrows=1000, parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", nrows=1000, parse_dates=["date_from", "date_to"])
print("loading data done!")
# =============================================================================
# Add image quality: by steeve
# =============================================================================
import pickle
with open('../input/inception_v3_include_head_max_train.p','rb') as f:
x = pickle.load(f)
train_features = x['features']
train_ids = x['ids']
with open('../input/inception_v3_include_head_max_test.p','rb') as f:
x = pickle.load(f)
test_features = x['features']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_features, columns = ['image_quality'])
incep_test_image_df = pd.DataFrame(test_features, columns = ['image_quality'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del incep_train_image_df, incep_test_image_df
gc.collect()
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns = ['blurinesses'])
incep_test_image_df = pd.DataFrame(test_blurinesses, columns = ['blurinesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding whitenesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_whitenesses = x['whitenesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_whitenesses = x['whitenesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_whitenesses, columns = ['whitenesses'])
incep_test_image_df = pd.DataFrame(test_whitenesses, columns = ['whitenesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding dullnesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_dullnesses = x['dullnesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_dullnesses = x['dullnesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_dullnesses, columns = ['dullnesses'])
incep_test_image_df = pd.DataFrame(test_dullnesses, columns = ['dullnesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
# =============================================================================
# new image data
# =============================================================================
print('adding average_pixel_width ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_pixel_width = x['average_pixel_width']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_pixel_width = x['average_pixel_width']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_pixel_width, columns = ['average_pixel_width'])
incep_test_image_df = pd.DataFrame(test_average_pixel_width, columns = ['average_pixel_width'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_reds ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_reds = x['average_reds']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_reds = x['average_reds']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_reds, columns = ['average_reds'])
incep_test_image_df = pd.DataFrame(test_average_reds, columns = ['average_reds'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_blues ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_blues = x['average_blues']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_blues = x['average_blues']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_blues, columns = ['average_blues'])
incep_test_image_df = pd.DataFrame(test_average_blues, columns = ['average_blues'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_greens ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_greens = x['average_greens']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_greens = x['average_greens']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_greens, columns = ['average_greens'])
incep_test_image_df = pd.DataFrame(test_average_greens, columns = ['average_greens'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding widths ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_widths = x['widths']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_widths = x['widths']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_widths, columns = ['widths'])
incep_test_image_df = pd.DataFrame(test_widths, columns = ['widths'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding heights ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_heights = x['heights']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_heights = x['heights']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_heights, columns = ['heights'])
incep_test_image_df = pd.DataFrame(test_heights, columns = ['heights'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del test_average_blues, test_average_greens, test_average_reds, incep_test_image_df
del train_average_blues, train_average_greens, train_average_reds, incep_train_image_df
gc.collect()
#==============================================================================
# image features by Qifeng
#==============================================================================
print('adding image features @ qifeng ...')
with open('../input/train_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_train = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_train.rename(columns = {'$ids':'image'}, inplace = True)
with open('../input/test_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_test = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_test.rename(columns = {'$ids':'image'}, inplace = True)
train_df = train_df.join(x_train.set_index('ids'), on='image')
test_df = test_df.join(x_test.set_index('ids'), on='image')
del x, x_train, x_test; gc.collect()
# =============================================================================
# add geo info: https://www.kaggle.com/frankherfert/avito-russian-region-cities/data
# =============================================================================
#tmp = pd.read_csv("../input/avito_region_city_features.csv", usecols=["region", "city", "latitude","longitude"])
#train_df = train_df.merge(tmp, on=["city","region"], how="left")
#train_df["lat_long"] = train_df["latitude"]+train_df["longitude"]
#test_df = test_df.merge(tmp, on=["city","region"], how="left")
#test_df["lat_long"] = test_df["latitude"]+test_df["longitude"]
#del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/region_income.csv", sep=";", names=["region", "income"])
train_df = train_df.merge(tmp, on="region", how="left")
test_df = test_df.merge(tmp, on="region", how="left")
del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/city_population_wiki_v3.csv")
train_df = train_df.merge(tmp, on="city", how="left")
test_df = test_df.merge(tmp, on="city", how="left")
del tmp; gc.collect()
# =============================================================================
# Here Based on https://www.kaggle.com/bminixhofer/aggregated-features-lightgbm/code
# =============================================================================
all_samples = pd.concat([train_df,train_active,test_df,test_active]).reset_index(drop=True)
all_samples.drop_duplicates(["item_id"], inplace=True)
del train_active, test_active; gc.collect()
all_periods = pd.concat([train_periods,test_periods])
del train_periods, test_periods; gc.collect()
all_periods["days_up"] = (all_periods["date_to"] - all_periods["date_from"]).dt.days
gp = all_periods.groupby(["item_id"])[["days_up"]]
gp_df = pd.DataFrame()
gp_df["days_up_sum"] = gp.sum()["days_up"]
gp_df["times_put_up"] = gp.count()["days_up"]
gp_df.reset_index(inplace=True)
gp_df.rename(index=str, columns={"index": "item_id"})
all_periods.drop_duplicates(["item_id"], inplace=True)
all_periods = all_periods.merge(gp_df, on="item_id", how="left")
all_periods = all_periods.merge(all_samples, on="item_id", how="left")
gp = all_periods.groupby(["user_id"])[["days_up_sum", "times_put_up"]].mean().reset_index()\
.rename(index=str, columns={"days_up_sum": "avg_days_up_user",
"times_put_up": "avg_times_up_user"})
n_user_items = all_samples.groupby(["user_id"])[["item_id"]].count().reset_index() \
.rename(index=str, columns={"item_id": "n_user_items"})
gp = gp.merge(n_user_items, on="user_id", how="outer") #left
del all_samples, all_periods, n_user_items
gc.collect()
train_df = train_df.merge(gp, on="user_id", how="left")
test_df = test_df.merge(gp, on="user_id", how="left")
agg_cols = list(gp.columns)[1:]
del gp; gc.collect()
for col in agg_cols:
train_df[col].fillna(-1, inplace=True)
test_df[col].fillna(-1, inplace=True)
print("merging supplimentary data done!")
# =============================================================================
# done! go to the normal steps
# =============================================================================
def rmse(predictions, targets):
print("calculating RMSE ...")
return np.sqrt(((predictions - targets) ** 2).mean())
def text_preprocessing(text):
text = str(text)
text = text.lower()
text = re.sub(r"(\\u[0-9A-Fa-f]+)",r"", text)
text = re.sub(r"===",r" ", text)
# https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
text = " ".join(map(str.strip, re.split('(\d+)',text)))
regex = re.compile(u'[^[:alpha:]]')
text = regex.sub(" ", text)
text = " ".join(text.split())
return text
@contextmanager
def feature_engineering(df):
# All the feature engineering here
def Do_Text_Hash(df):
print("feature engineering -> hash text ...")
df["text_feature"] = df.apply(lambda row: " ".join([str(row["param_1"]),
str(row["param_2"]), str(row["param_3"])]),axis=1)
df["text_feature_2"] = df.apply(lambda row: " ".join([str(row["param_2"]), str(row["param_3"])]),axis=1)
df["title_description"] = df.apply(lambda row: " ".join([str(row["title"]), str(row["description"])]),axis=1)
print("feature engineering -> preprocess text ...")
df["text_feature"] = df["text_feature"].apply(lambda x: text_preprocessing(x))
df["text_feature_2"] = df["text_feature_2"].apply(lambda x: text_preprocessing(x))
df["description"] = df["description"].apply(lambda x: text_preprocessing(x))
df["title"] = df["title"].apply(lambda x: text_preprocessing(x))
df["title_description"] = df["title_description"].apply(lambda x: text_preprocessing(x))
def Do_Datetime(df):
print("feature engineering -> date time ...")
df["wday"] = df["activation_date"].dt.weekday
df["wday"] =df["wday"].astype(np.uint8)
def Do_Label_Enc(df):
print("feature engineering -> label encoding ...")
lbl = LabelEncoder()
cat_col = ["user_id", "region", "city", "parent_category_name",
"category_name", "user_type", "image_top_1",
"param_1", "param_2", "param_3","image",
]
for col in cat_col:
df[col] = lbl.fit_transform(df[col].astype(str))
gc.collect()
import string
count = lambda l1,l2: sum([1 for x in l1 if x in l2])
def Do_NA(df):
print("feature engineering -> fill na ...")
df["image_top_1"].fillna(-1,inplace=True)
df["image"].fillna("noinformation",inplace=True)
df["param_1"].fillna("nicapotato",inplace=True)
df["param_2"].fillna("nicapotato",inplace=True)
df["param_3"].fillna("nicapotato",inplace=True)
df["title"].fillna("nicapotato",inplace=True)
df["description"].fillna("nicapotato",inplace=True)
# price vs income
# df["price_vs_city_income"] = df["price"] / df["income"]
# df["price_vs_city_income"].fillna(-1, inplace=True)
def Do_Count(df):
print("feature engineering -> do count ...")
# some count
df["num_desc_punct"] = df["description"].apply(lambda x: count(x, set(string.punctuation))).astype(np.int16)
df["num_desc_capE"] = df["description"].apply(lambda x: count(x, "[A-Z]")).astype(np.int16)
df["num_desc_capP"] = df["description"].apply(lambda x: count(x, "[А-Я]")).astype(np.int16)
df["num_title_punct"] = df["title"].apply(lambda x: count(x, set(string.punctuation))).astype(np.int16)
df["num_title_capE"] = df["title"].apply(lambda x: count(x, "[A-Z]")).astype(np.int16)
df["num_title_capP"] = df["title"].apply(lambda x: count(x, "[А-Я]")) .astype(np.int16)
# good, used, bad ... count
df["is_in_desc_хорошо"] = df["description"].str.contains("хорошо").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_Плохо"] = df["description"].str.contains("Плохо").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_новый"] = df["description"].str.contains("новый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_старый"] = df["description"].str.contains("старый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_используемый"] = df["description"].str.contains("используемый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_есплатная_доставка"] = df["description"].str.contains("есплатная доставка").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_есплатный_возврат"] = df["description"].str.contains("есплатный возврат").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_идеально"] = df["description"].str.contains("идеально").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_подержанный"] = df["description"].str.contains("подержанный").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_пСниженные_цены"] = df["description"].str.contains("Сниженные цены").map({True:1, False:0}).astype(np.uint8)
# new count 0604
df["num_title_Exclamation"] = df["title"].apply(lambda x: count(x, "!")).astype(np.int16)
df["num_title_Question"] = df["title"].apply(lambda x: count(x, "?")).astype(np.int16)
df["num_desc_Exclamation"] = df["description"].apply(lambda x: count(x, "!")).astype(np.int16)
df["num_desc_Question"] = df["description"].apply(lambda x: count(x, "?")).astype(np.int16)
def Do_Drop(df):
df.drop(["activation_date"], axis=1, inplace=True)
def Do_Stat_Text(df):
print("feature engineering -> statistics in text ...")
textfeats = ["text_feature","text_feature_2","description", "title"]
for col in textfeats:
df[col + "_num_chars"] = df[col].apply(len).astype(np.int16)
df[col + "_num_words"] = df[col].apply(lambda comment: len(comment.split())).astype(np.int16)
df[col + "_num_unique_words"] = df[col].apply(lambda comment: len(set(w for w in comment.split()))).astype(np.int16)
df[col + "_words_vs_unique"] = (df[col+"_num_unique_words"] / df[col+"_num_words"] * 100).astype(np.float32)
gc.collect()
# choose which functions to run
Do_NA(df)
Do_Text_Hash(df)
Do_Label_Enc(df)
Do_Count(df)
Do_Datetime(df)
Do_Stat_Text(df)
Do_Drop(df)
gc.collect()
return df
def data_vectorize(df):
russian_stop = set(stopwords.words("russian"))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": "word",
"token_pattern": r"\w{1,}",
"sublinear_tf": True,
"dtype": np.float32,
"norm": "l2",
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
tfidf_para2 = {
"stop_words": russian_stop,
"analyzer": "char",
"token_pattern": r"\w{1,}",
"sublinear_tf": True,
"dtype": np.float32,
"norm": "l2",
# "min_df":5,
# "max_df":.9,
"smooth_idf": False
}
# mean rmse is: 0.23865288181138436
def get_col(col_name): return lambda x: x[col_name]
vectorizer = FeatureUnion([
("description", TfidfVectorizer(
ngram_range=(1, 2),
max_features=40000,#40000,18000
**tfidf_para,
preprocessor=get_col("description"))
),
# ("title_description", TfidfVectorizer(
# ngram_range=(1, 2),#(1,2)
# max_features=1800,#40000,18000
# **tfidf_para,
# preprocessor=get_col("title_description"))
# ),
("text_feature", CountVectorizer(
ngram_range=(1, 2),
preprocessor=get_col("text_feature"))
),
("title", TfidfVectorizer(
ngram_range=(1, 2),
**tfidf_para,
preprocessor=get_col("title"))
),
#新加入两个文本处理title2,title_char
("title2", TfidfVectorizer(
ngram_range=(1, 1),
**tfidf_para,
preprocessor=get_col("title"))
),
("title_char", TfidfVectorizer(
ngram_range=(1, 4),#(1, 4),(1,6)
max_features=16000,#16000
**tfidf_para2,
preprocessor=get_col("title"))
),
])
vectorizer.fit(df.to_dict("records"))
ready_full_df = vectorizer.transform(df.to_dict("records"))
tfvocab = vectorizer.get_feature_names()
df.drop(["text_feature", "text_feature_2", "description","title", "title_description"], axis=1, inplace=True)
df.fillna(-1, inplace=True)
return df, ready_full_df, tfvocab
# =============================================================================
# Ridge feature https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
# =============================================================================
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((len_train,))
oof_test = np.zeros((len_test,))
oof_test_skf = np.empty((NFOLDS, len_test))
for i, (train_index, test_index) in enumerate(kf):
# print('Ridege oof Fold {}'.format(i))
x_tr = x_train[train_index]
y = np.array(y)
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
full_df = pd.concat([train_df, test_df])
sub_item_id = test_df["item_id"]
len_train = len(train_df)
len_test = len(test_df)
# =============================================================================
# handle price
# =============================================================================
def feature_Eng_On_Price_Make_More_Cat(df):
print('feature engineering -> on price and SEQ ...')
df["price"] = np.log(df["price"]+0.001).astype("float32")
df["price"].fillna(-1,inplace=True)
df["price+"] = np.round(df["price"]*2.8).astype(np.int16) # 4.8
df["item_seq_number+"] = np.round(df["item_seq_number"]/100).astype(np.int16)
# by steeve
df['des_len_log'] = (np.log(df['description_num_chars']) * 4).astype(np.int8)
df['des_nwords_log'] = (np.log1p(df['description_num_words']) * 20).astype(np.int8)
return df
def feature_Eng_On_Deal_Prob(df, df_train):
print('feature engineering -> on price deal prob +...')
df2 = df
# [465] train's rmse: 0.161946 valid's rmse: 0.22738
tmp = df_train.groupby(["price+"], as_index=False)['deal_probability'].median().rename(columns={'deal_probability':'median_deal_probability_price+'})
df = pd.merge(df, tmp, how='left', on=["price+"])
df2['median_deal_probability_price+'] = df['median_deal_probability_price+']
df2['median_deal_probability_price+'] =df2['median_deal_probability_price+'].astype(np.float32)
del tmp; gc.collect()
tmp = df_train.groupby(["param_2"], as_index=False)['deal_probability'].median().rename(columns={'deal_probability':'median_deal_probability_param_2'})
df = pd.merge(df, tmp, how='left', on=["param_2"])
df2['median_deal_probability_param_2'] = df['median_deal_probability_param_2']
df2['median_deal_probability_param_2'] =df2['median_deal_probability_param_2'].astype(np.float32)
del tmp; gc.collect()
tmp = df_train.groupby(["item_seq_number+"], as_index=False)['deal_probability'].median().rename(columns={'deal_probability':'median_deal_probability_item_seq_number+'})
df = pd.merge(df, tmp, how='left', on=["item_seq_number+"])
df2['median_deal_probability_item_seq_number+'] = df['median_deal_probability_item_seq_number+']
df2['median_deal_probability_item_seq_number+'] =df2['median_deal_probability_item_seq_number+'].astype(np.float32)
del tmp; gc.collect()
return df2
del train_df, test_df; gc.collect()
# =============================================================================
# use additianl image data
# =============================================================================
feature_engineering(full_df)
# 内存优化
full_df["average_blues"] = full_df["average_blues"].astype(np.float32)
full_df["average_greens"] = full_df["average_greens"].astype(np.float32)
full_df["average_pixel_width"] = full_df["average_pixel_width"].astype(np.float32)
full_df["average_reds"] = full_df["average_reds"].astype(np.float32)
full_df["avg_days_up_user"] = full_df["avg_days_up_user"].astype(np.float32)
full_df["avg_times_up_user"] = full_df["avg_times_up_user"].astype(np.float32)
full_df["blurinesses"] = full_df["blurinesses"].astype(np.float32)
full_df["dullnesses"] = full_df["dullnesses"].astype(np.float32)
full_df["heights"] = full_df["heights"].astype(np.float32)
full_df["parent_category_name"] = full_df["parent_category_name"].astype(np.float32)
full_df["whitenesses"] = full_df["whitenesses"].astype(np.float32)
full_df["widths"] = full_df["widths"].astype(np.float32)
full_df["category_name"] = full_df["category_name"].astype(np.int32)
full_df["city"] = full_df["city"].astype(np.int32)
full_df["image"] = full_df["image"].astype(np.int32)
full_df["image_top_1"] = full_df["image_top_1"].astype(np.int32)
full_df["income"] = full_df["income"].astype(np.int32)
full_df["item_seq_number"] = full_df["item_seq_number"].astype(np.int32)
full_df["n_user_items"] = full_df["n_user_items"].astype(np.int32)
full_df["param_1"] = full_df["param_1"].astype(np.int32)
full_df["param_2"] = full_df["param_2"].astype(np.int32)
full_df["param_3"] = full_df["param_3"].astype(np.int32)
full_df["parent_category_name"] = full_df["parent_category_name"].astype(np.int32)
full_df["region"] = full_df["region"].astype(np.int32)
full_df["user_id"] = full_df["user_id"].astype(np.int32)
full_df["user_type"] = full_df["user_type"].astype(np.int32)
full_df["population"] = full_df["population"].fillna(-1).astype(np.int32)
full_df["average_HLS_Hs"] = full_df["average_HLS_Hs"].astype(np.float32)
full_df["average_HLS_Ls"] = full_df["average_HLS_Ls"].astype(np.float32)
full_df["average_HLS_Ss"] = full_df["average_HLS_Ss"].astype(np.float32)
full_df["average_HSV_Ss"] = full_df["average_HSV_Ss"].astype(np.float32)
full_df["average_HSV_Vs"] = full_df["average_HSV_Vs"].astype(np.float32)
full_df["average_LUV_Ls"] = full_df["average_LUV_Ls"].astype(np.float32)
full_df["average_LUV_Us"] = full_df["average_LUV_Us"].astype(np.float32)
full_df["average_LUV_Vs"] = full_df["average_LUV_Vs"].astype(np.float32)
full_df["average_YUV_Us"] = full_df["average_YUV_Us"].astype(np.float32)
full_df["average_YUV_Vs"] = full_df["average_YUV_Vs"].astype(np.float32)
full_df["average_YUV_Ys"] = full_df["average_YUV_Ys"].astype(np.float32)
gc.collect()
from sklearn.model_selection import KFold
kf2 = KFold(n_splits=5, random_state=42, shuffle=True)
numIter = 0
rmse_sume = 0.
numLimit = 5
tmp = pd.DataFrame(full_df)
full_df_COPY = pd.DataFrame(tmp)
del tmp
pred_vals=np.zeros(y.shape)
for train_index, valid_index in kf2.split(y):
numIter +=1
print("training in fold " + str(numIter))
if numIter>=numLimit+1:
pass
else:
full_df = pd.DataFrame(full_df_COPY)
tmp = full_df[:len_train]
train_df = tmp.iloc[train_index]
del tmp;gc.collect()
# 不考虑使用均值
try:
full_df.drop('median_deal_probability_price+', axis=1, inplace=True); gc.collect()
train_df.drop('median_deal_probability_price+', axis=1, inplace=True); gc.collect()
full_df.drop('median_deal_probability_param_2', axis=1, inplace=True); gc.collect()
train_df.drop('median_deal_probability_param_2', axis=1, inplace=True); gc.collect()
full_df.drop('median_deal_probability_item_seq_number+', axis=1, inplace=True); gc.collect()
train_df.drop('median_deal_probability_item_seq_number+', axis=1, inplace=True); gc.collect()
except:
pass
feature_Eng_On_Price_Make_More_Cat(full_df)
feature_Eng_On_Price_Make_More_Cat(train_df)
feature_Eng_On_Deal_Prob(full_df, train_df)
try:
full_df.drop('deal_probability', axis=1, inplace=True); gc.collect()
except:
pass
full_df, ready_full_df, tfvocab = data_vectorize(full_df)
ready_df = ready_full_df
# NN Steeve
print("load nn oof 1 ...")
nn_oof_train = pd.read_csv('../input/emb_all_80_itseqcat_price_p4_pr1_catn_rg_ut_p_ict_pc_dll_dnl_rgb_apw_5fold_train.csv')
nn_oof_train.drop("user_id", axis=1, inplace=True)
nn_oof_test = pd.read_csv('../input/emb_all_80_itseqcat_price_p4_pr1_catn_rg_ut_p_ict_pc_dll_dnl_rgb_apw_5fold_test.csv')
nn_oof_full = pd.concat([nn_oof_train, nn_oof_test])
nn_oof_full["nn_oof_1"] = nn_oof_full["deal_probability"]
del nn_oof_full["deal_probability"]
full_df = pd.merge(full_df, nn_oof_full, on='item_id', how='left')
del nn_oof_train, nn_oof_test
gc.collect()
# NN Zhuang
print("load nn oof 2 ...")
nn_oof_train = pd.read_csv('../input/res12_oof.csv')
nn_oof_train.drop("user_id", axis=1, inplace=True)
nn_oof_test = pd.read_csv('../input/res12.csv')
nn_oof_full = pd.concat([nn_oof_train, nn_oof_test])
nn_oof_full["nn_oof_2"] = nn_oof_full["deal_probability"]
del nn_oof_full["deal_probability"]
full_df = pd.merge(full_df, nn_oof_full, on='item_id', how='left')
del nn_oof_train, nn_oof_test
gc.collect()
from sklearn.cross_validation import KFold
NFOLDS = 5#5
SEED = 42
kf = KFold(len_train, n_folds=NFOLDS, shuffle=True, random_state=SEED)
# SGD
from sklearn.linear_model import SGDRegressor
sgdregressor_params = {'alpha':0.0001, 'random_state':SEED, 'tol':1e-3}
sgd = SklearnWrapper(clf=SGDRegressor, seed = SEED, params = sgdregressor_params)
FULL_DF = pd.DataFrame(full_df)
FULL_DF.drop(["item_id"], axis=1, inplace=True)
tmp1 = pd.DataFrame(full_df)
tmp1.drop(["item_id"], axis=1, inplace=True)
print('sgd 1 oof ...')
sgd_oof_train, sgd_oof_test = get_oof(sgd, np.array(FULL_DF)[:len_train], y, np.array(FULL_DF)[len_train:])
sgd_preds = np.concatenate([sgd_oof_train, sgd_oof_test])
tmp1['sgd_preds_1'] = sgd_preds.astype(np.float32)
tmp1['sgd_preds_1'].clip(0.0, 1.0, inplace=True)
print('sgd 2 oof ...')
sgd_oof_train, sgd_oof_test = get_oof(sgd, ready_df[:len_train], y, ready_df[len_train:])
sgd_preds = np.concatenate([sgd_oof_train, sgd_oof_test])
tmp1['sgd_preds_2'] = sgd_preds.astype(np.float32)
tmp1['sgd_preds_2'].clip(0.0, 1.0, inplace=True)
# Ridge
#'alpha':20.0
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':1e-3, 'solver':'auto', 'random_state':SEED}
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
FULL_DF = pd.DataFrame(full_df)
FULL_DF.drop(["item_id"], axis=1, inplace=True)
tmp2 = pd.DataFrame(full_df)
tmp2.drop(["item_id"], axis=1, inplace=True)
print('ridge 1 oof ...')
ridge_oof_train, ridge_oof_test = get_oof(ridge, np.array(FULL_DF)[:len_train], y, np.array(FULL_DF)[len_train:])
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
tmp2['ridge_preds_1'] = ridge_preds.astype(np.float32)
tmp2['ridge_preds_1'].clip(0.0, 1.0, inplace=True)
print('ridge 2 oof ...')
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:len_train], y, ready_df[len_train:])
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
tmp2['ridge_preds_2'] = ridge_preds.astype(np.float32)
tmp2['ridge_preds_2'].clip(0.0, 1.0, inplace=True)
## Ridge
##'alpha':20.0
ridge_params = {'alpha':10.0, 'fit_intercept':True, 'normalize':True, 'copy_X':True,
'max_iter':None, 'tol':1e-3, 'solver':'auto', 'random_state':SEED+2011}
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
FULL_DF = pd.DataFrame(full_df)
FULL_DF.drop(["item_id"], axis=1, inplace=True)
tmp3 = pd.DataFrame(full_df)
tmp3.drop(["item_id"], axis=1, inplace=True)
print('ridge 1a oof ...')
ridge_oof_train, ridge_oof_test = get_oof(ridge, np.array(FULL_DF)[:len_train], y, np.array(FULL_DF)[len_train:])
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
tmp3['ridge_preds_1a'] = ridge_preds.astype(np.float32)
tmp3['ridge_preds_1a'].clip(0.0, 1.0, inplace=True)
print('ridge 2a oof ...')
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:len_train], y, ready_df[len_train:])
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
tmp3['ridge_preds_2a'] = ridge_preds.astype(np.float32)
tmp3['ridge_preds_2a'].clip(0.0, 1.0, inplace=True)
# 融入oof结果
full_df['sgd_preds_1'] = tmp1['sgd_preds_1'].astype(np.float32)
full_df['sgd_preds_2'] = tmp1['sgd_preds_2'].astype(np.float32)
full_df['ridge_preds_1'] = tmp2['ridge_preds_1'].astype(np.float32)
full_df['ridge_preds_2'] = tmp2['ridge_preds_2'].astype(np.float32)
full_df['ridge_preds_1a'] = tmp3['ridge_preds_1a'].astype(np.float32)
full_df['ridge_preds_2a'] = tmp3['ridge_preds_2a'].astype(np.float32)
del tmp1, tmp2, tmp3
del ridge_oof_train, ridge_oof_test, ridge_preds, ridge, sgd_oof_test, sgd_oof_train, sgd_preds, ready_df
gc.collect()
full_df.drop("item_id", axis=1, inplace=True)
print("Modeling Stage ...")
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(full_df.iloc[:len_train]), ready_full_df[:len_train]]) # Sparse Matrix
tfvocab = full_df.columns.tolist() + tfvocab
X_test_full=full_df.iloc[len_train:]
X_test_ready=ready_full_df[len_train:]
del ready_full_df, full_df
gc.collect()
print("Feature Names Length: ",len(tfvocab))
cat_col = [
"user_id",
"region",
"city",
"parent_category_name",
"category_name",
"user_type",
"image_top_1",
"param_1",
"param_2",
"param_3",
"price+",
"item_seq_number+",
]
print("Modeling Stage ...")
X_train, X_valid = X.tocsr()[train_index], X.tocsr()[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
gc.collect()
params = {'eta': 0.02, # 0.03,
"booster": "gbtree",
# 'tree_method':'hist',
# 'max_leaf_nodes': 500,
'max_depth': 20, # 18
"nthread": 20,
'subsample': 0.9,
'colsample_bytree': 0.8,
'colsample_bylevel': 0.8,
'min_child_weight': 2,
'alpha': 1,
'objective': 'reg:logistic',
'eval_metric': 'rmse',
'random_state': 42,
'silent': True,
# 'tree_method': 'gpu_hist' # Use GPU accelerated algorithm
# "nrounds": 8000
}
tr_data = xgb.DMatrix(X_train, y_train)
va_data = xgb.DMatrix(X_valid, y_valid)
gc.collect()
watchlist = [(tr_data, 'train'), (va_data, 'valid')]
model = xgb.train(params, tr_data, 30000, watchlist, maximize=False, early_stopping_rounds=200,
verbose_eval=100)
print("save model ...")
joblib.dump(model, "lgb_{}.pkl".format(numIter))
## load model
#lgb_clf = joblib.load("lgb.pkl")
print("Model Evaluation Stage")
pred_vals=rmse(y_valid, model.predict(va_data,ntree_limit=model.best_ntree_limit))
print( "RMSE:", pred_vals )
test = hstack([csr_matrix(X_test_full), X_test_ready]) # Sparse Matrix
X_te = xgb.DMatrix(test)
lgpred = model.predict(X_te,ntree_limit=model.best_ntree_limit)
lgsub = pd.DataFrame(lgpred,columns=["deal_probability"],index=sub_item_id)
lgsub["deal_probability"].clip(0.0, 1.0, inplace=True) # Between 0 and 1
lgsub.to_csv("ml_xgb_5fold_sub_{}.csv".format(numIter),index=True,header=True)
rmse_sume += rmse(y_valid, pred_vals)
del X_train, X_valid, y_train, y_valid, tr_data, va_data, train_df
gc.collect()
print("mean rmse is:", rmse_sume/numLimit)
print("Features importance...")
# xgb.plot_importance(model)
# gain = bst.feature_importance("gain")
# ft = pd.DataFrame({"feature":bst.feature_name(), "split":bst.feature_importance("split"), "gain":100 * gain / gain.sum()}).sort_values("gain", ascending=False)
# print(ft.head(50))
#
#plt.figure()
#ft[["feature","gain"]].head(50).plot(kind="barh", x="feature", y="gain", legend=False, figsize=(10, 20))
#plt.gcf().savefig("features_importance.png")
train_data = pd.read_csv('../input/train.csv')
label = ['deal_probability']
train_user_ids = train_data.user_id.values
train_item_ids = train_data.item_id.values
train_item_ids = train_item_ids.reshape(len(train_item_ids), 1)
train_user_ids = train_item_ids.reshape(len(train_user_ids), 1)
val_predicts = pd.DataFrame(data=pred_vals, columns= label)
val_predicts['user_id'] = train_user_ids
val_predicts['item_id'] = train_item_ids
val_predicts.to_csv('ml_xgb_5fold_train_oof.csv', index=False)
print("All Done.")
| [
"sklearn.cross_validation.KFold",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"re.compile",
"numpy.log",
"numpy.array",
"xgboost.DMatrix",
"re.split",
"nltk.corpus.stopwords.words",
"xgboost.train",
"numpy.empty",
"numpy.concatenate",
"pandas.DataFrame",
"scipy.sparse.csr_matr... | [((2487, 2499), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2497, 2499), False, 'import gc, re\n'), ((2525, 2580), 'pandas.DataFrame', 'pd.DataFrame', (['train_features'], {'columns': "['image_quality']"}), "(train_features, columns=['image_quality'])\n", (2537, 2580), True, 'import pandas as pd\n'), ((2605, 2659), 'pandas.DataFrame', 'pd.DataFrame', (['test_features'], {'columns': "['image_quality']"}), "(test_features, columns=['image_quality'])\n", (2617, 2659), True, 'import pandas as pd\n'), ((2952, 2964), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2962, 2964), False, 'import gc, re\n'), ((3257, 3269), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3267, 3269), False, 'import gc, re\n'), ((3295, 3351), 'pandas.DataFrame', 'pd.DataFrame', (['train_blurinesses'], {'columns': "['blurinesses']"}), "(train_blurinesses, columns=['blurinesses'])\n", (3307, 3351), True, 'import pandas as pd\n'), ((3376, 3431), 'pandas.DataFrame', 'pd.DataFrame', (['test_blurinesses'], {'columns': "['blurinesses']"}), "(test_blurinesses, columns=['blurinesses'])\n", (3388, 3431), True, 'import pandas as pd\n'), ((3997, 4009), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4007, 4009), False, 'import gc, re\n'), ((4035, 4091), 'pandas.DataFrame', 'pd.DataFrame', (['train_whitenesses'], {'columns': "['whitenesses']"}), "(train_whitenesses, columns=['whitenesses'])\n", (4047, 4091), True, 'import pandas as pd\n'), ((4116, 4171), 'pandas.DataFrame', 'pd.DataFrame', (['test_whitenesses'], {'columns': "['whitenesses']"}), "(test_whitenesses, columns=['whitenesses'])\n", (4128, 4171), True, 'import pandas as pd\n'), ((4731, 4743), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4741, 4743), False, 'import gc, re\n'), ((4769, 4823), 'pandas.DataFrame', 'pd.DataFrame', (['train_dullnesses'], {'columns': "['dullnesses']"}), "(train_dullnesses, columns=['dullnesses'])\n", (4781, 4823), True, 'import pandas as pd\n'), ((4848, 4901), 'pandas.DataFrame', 'pd.DataFrame', (['test_dullnesses'], {'columns': "['dullnesses']"}), "(test_dullnesses, columns=['dullnesses'])\n", (4860, 4901), True, 'import pandas as pd\n'), ((5688, 5700), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5698, 5700), False, 'import gc, re\n'), ((5726, 5798), 'pandas.DataFrame', 'pd.DataFrame', (['train_average_pixel_width'], {'columns': "['average_pixel_width']"}), "(train_average_pixel_width, columns=['average_pixel_width'])\n", (5738, 5798), True, 'import pandas as pd\n'), ((5823, 5894), 'pandas.DataFrame', 'pd.DataFrame', (['test_average_pixel_width'], {'columns': "['average_pixel_width']"}), "(test_average_pixel_width, columns=['average_pixel_width'])\n", (5835, 5894), True, 'import pandas as pd\n'), ((6468, 6480), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6478, 6480), False, 'import gc, re\n'), ((6506, 6564), 'pandas.DataFrame', 'pd.DataFrame', (['train_average_reds'], {'columns': "['average_reds']"}), "(train_average_reds, columns=['average_reds'])\n", (6518, 6564), True, 'import pandas as pd\n'), ((6589, 6646), 'pandas.DataFrame', 'pd.DataFrame', (['test_average_reds'], {'columns': "['average_reds']"}), "(test_average_reds, columns=['average_reds'])\n", (6601, 6646), True, 'import pandas as pd\n'), ((7224, 7236), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7234, 7236), False, 'import gc, re\n'), ((7262, 7322), 'pandas.DataFrame', 'pd.DataFrame', (['train_average_blues'], {'columns': "['average_blues']"}), "(train_average_blues, columns=['average_blues'])\n", (7274, 7322), True, 'import pandas as pd\n'), ((7347, 7406), 'pandas.DataFrame', 'pd.DataFrame', (['test_average_blues'], {'columns': "['average_blues']"}), "(test_average_blues, columns=['average_blues'])\n", (7359, 7406), True, 'import pandas as pd\n'), ((7991, 8003), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8001, 8003), False, 'import gc, re\n'), ((8029, 8091), 'pandas.DataFrame', 'pd.DataFrame', (['train_average_greens'], {'columns': "['average_greens']"}), "(train_average_greens, columns=['average_greens'])\n", (8041, 8091), True, 'import pandas as pd\n'), ((8116, 8177), 'pandas.DataFrame', 'pd.DataFrame', (['test_average_greens'], {'columns': "['average_greens']"}), "(test_average_greens, columns=['average_greens'])\n", (8128, 8177), True, 'import pandas as pd\n'), ((8721, 8733), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8731, 8733), False, 'import gc, re\n'), ((8759, 8805), 'pandas.DataFrame', 'pd.DataFrame', (['train_widths'], {'columns': "['widths']"}), "(train_widths, columns=['widths'])\n", (8771, 8805), True, 'import pandas as pd\n'), ((8830, 8875), 'pandas.DataFrame', 'pd.DataFrame', (['test_widths'], {'columns': "['widths']"}), "(test_widths, columns=['widths'])\n", (8842, 8875), True, 'import pandas as pd\n'), ((9424, 9436), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9434, 9436), False, 'import gc, re\n'), ((9461, 9509), 'pandas.DataFrame', 'pd.DataFrame', (['train_heights'], {'columns': "['heights']"}), "(train_heights, columns=['heights'])\n", (9473, 9509), True, 'import pandas as pd\n'), ((9534, 9581), 'pandas.DataFrame', 'pd.DataFrame', (['test_heights'], {'columns': "['heights']"}), "(test_heights, columns=['heights'])\n", (9546, 9581), True, 'import pandas as pd\n'), ((9997, 10009), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10007, 10009), False, 'import gc, re\n'), ((10340, 10582), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': "['average_HSV_Ss', 'average_HSV_Vs', 'average_LUV_Ls', 'average_LUV_Us',\n 'average_LUV_Vs', 'average_HLS_Hs', 'average_HLS_Ls', 'average_HLS_Ss',\n 'average_YUV_Ys', 'average_YUV_Us', 'average_YUV_Vs', 'ids']"}), "(x, columns=['average_HSV_Ss', 'average_HSV_Vs',\n 'average_LUV_Ls', 'average_LUV_Us', 'average_LUV_Vs', 'average_HLS_Hs',\n 'average_HLS_Ls', 'average_HLS_Ss', 'average_YUV_Ys', 'average_YUV_Us',\n 'average_YUV_Vs', 'ids'])\n", (10352, 10582), True, 'import pandas as pd\n'), ((11185, 11427), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': "['average_HSV_Ss', 'average_HSV_Vs', 'average_LUV_Ls', 'average_LUV_Us',\n 'average_LUV_Vs', 'average_HLS_Hs', 'average_HLS_Ls', 'average_HLS_Ss',\n 'average_YUV_Ys', 'average_YUV_Us', 'average_YUV_Vs', 'ids']"}), "(x, columns=['average_HSV_Ss', 'average_HSV_Vs',\n 'average_LUV_Ls', 'average_LUV_Us', 'average_LUV_Vs', 'average_HLS_Hs',\n 'average_HLS_Ls', 'average_HLS_Ss', 'average_YUV_Ys', 'average_YUV_Us',\n 'average_YUV_Vs', 'ids'])\n", (11197, 11427), True, 'import pandas as pd\n'), ((12080, 12092), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12090, 12092), False, 'import gc, re\n'), ((12924, 13002), 'pandas.read_csv', 'pd.read_csv', (['"""../input/region_income.csv"""'], {'sep': '""";"""', 'names': "['region', 'income']"}), "('../input/region_income.csv', sep=';', names=['region', 'income'])\n", (12935, 13002), True, 'import pandas as pd\n'), ((13122, 13134), 'gc.collect', 'gc.collect', ([], {}), '()\n', (13132, 13134), False, 'import gc, re\n'), ((13321, 13372), 'pandas.read_csv', 'pd.read_csv', (['"""../input/city_population_wiki_v3.csv"""'], {}), "('../input/city_population_wiki_v3.csv')\n", (13332, 13372), True, 'import pandas as pd\n'), ((13488, 13500), 'gc.collect', 'gc.collect', ([], {}), '()\n', (13498, 13500), False, 'import gc, re\n'), ((13925, 13937), 'gc.collect', 'gc.collect', ([], {}), '()\n', (13935, 13937), False, 'import gc, re\n'), ((13953, 13993), 'pandas.concat', 'pd.concat', (['[train_periods, test_periods]'], {}), '([train_periods, test_periods])\n', (13962, 13993), True, 'import pandas as pd\n'), ((14026, 14038), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14036, 14038), False, 'import gc, re\n'), ((14185, 14199), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14197, 14199), True, 'import pandas as pd\n'), ((15037, 15049), 'gc.collect', 'gc.collect', ([], {}), '()\n', (15047, 15049), False, 'import gc, re\n'), ((15203, 15215), 'gc.collect', 'gc.collect', ([], {}), '()\n', (15213, 15215), False, 'import gc, re\n'), ((25481, 25511), 'pandas.concat', 'pd.concat', (['[train_df, test_df]'], {}), '([train_df, test_df])\n', (25490, 25511), True, 'import pandas as pd\n'), ((27912, 27924), 'gc.collect', 'gc.collect', ([], {}), '()\n', (27922, 27924), False, 'import gc, re\n'), ((30741, 30753), 'gc.collect', 'gc.collect', ([], {}), '()\n', (30751, 30753), False, 'import gc, re\n'), ((30804, 30852), 'sklearn.cross_validation.KFold', 'KFold', ([], {'n_splits': '(5)', 'random_state': '(42)', 'shuffle': '(True)'}), '(n_splits=5, random_state=42, shuffle=True)\n', (30809, 30852), False, 'from sklearn.cross_validation import KFold\n'), ((30900, 30921), 'pandas.DataFrame', 'pd.DataFrame', (['full_df'], {}), '(full_df)\n', (30912, 30921), True, 'import pandas as pd\n'), ((30937, 30954), 'pandas.DataFrame', 'pd.DataFrame', (['tmp'], {}), '(tmp)\n', (30949, 30954), True, 'import pandas as pd\n'), ((30973, 30990), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (30981, 30990), True, 'import numpy as np\n'), ((42688, 42721), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {}), "('../input/train.csv')\n", (42699, 42721), True, 'import pandas as pd\n'), ((42982, 43025), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'pred_vals', 'columns': 'label'}), '(data=pred_vals, columns=label)\n', (42994, 43025), True, 'import pandas as pd\n'), ((559, 570), 'time.time', 'time.time', ([], {}), '()\n', (568, 570), False, 'import time\n'), ((680, 746), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {'parse_dates': "['activation_date']"}), "('../input/train.csv', parse_dates=['activation_date'])\n", (691, 746), True, 'import pandas as pd\n'), ((801, 866), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test.csv"""'], {'parse_dates': "['activation_date']"}), "('../input/test.csv', parse_dates=['activation_date'])\n", (812, 866), True, 'import pandas as pd\n'), ((890, 949), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train_active.csv"""'], {'usecols': 'used_cols'}), "('../input/train_active.csv', usecols=used_cols)\n", (901, 949), True, 'import pandas as pd\n'), ((968, 1026), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test_active.csv"""'], {'usecols': 'used_cols'}), "('../input/test_active.csv', usecols=used_cols)\n", (979, 1026), True, 'import pandas as pd\n'), ((1047, 1126), 'pandas.read_csv', 'pd.read_csv', (['"""../input/periods_train.csv"""'], {'parse_dates': "['date_from', 'date_to']"}), "('../input/periods_train.csv', parse_dates=['date_from', 'date_to'])\n", (1058, 1126), True, 'import pandas as pd\n'), ((1146, 1224), 'pandas.read_csv', 'pd.read_csv', (['"""../input/periods_test.csv"""'], {'parse_dates': "['date_from', 'date_to']"}), "('../input/periods_test.csv', parse_dates=['date_from', 'date_to'])\n", (1157, 1224), True, 'import pandas as pd\n'), ((1246, 1312), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {'parse_dates': "['activation_date']"}), "('../input/train.csv', parse_dates=['activation_date'])\n", (1257, 1312), True, 'import pandas as pd\n'), ((1330, 1366), 'sklearn.utils.shuffle', 'shuffle', (['train_df'], {'random_state': '(1234)'}), '(train_df, random_state=1234)\n', (1337, 1366), False, 'from sklearn.utils import shuffle\n'), ((1452, 1529), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test.csv"""'], {'nrows': '(1000)', 'parse_dates': "['activation_date']"}), "('../input/test.csv', nrows=1000, parse_dates=['activation_date'])\n", (1463, 1529), True, 'import pandas as pd\n'), ((1557, 1628), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train_active.csv"""'], {'nrows': '(1000)', 'usecols': 'used_cols'}), "('../input/train_active.csv', nrows=1000, usecols=used_cols)\n", (1568, 1628), True, 'import pandas as pd\n'), ((1648, 1718), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test_active.csv"""'], {'nrows': '(1000)', 'usecols': 'used_cols'}), "('../input/test_active.csv', nrows=1000, usecols=used_cols)\n", (1659, 1718), True, 'import pandas as pd\n'), ((1740, 1836), 'pandas.read_csv', 'pd.read_csv', (['"""../input/periods_train.csv"""'], {'nrows': '(1000)', 'parse_dates': "['date_from', 'date_to']"}), "('../input/periods_train.csv', nrows=1000, parse_dates=[\n 'date_from', 'date_to'])\n", (1751, 1836), True, 'import pandas as pd\n'), ((1852, 1947), 'pandas.read_csv', 'pd.read_csv', (['"""../input/periods_test.csv"""'], {'nrows': '(1000)', 'parse_dates': "['date_from', 'date_to']"}), "('../input/periods_test.csv', nrows=1000, parse_dates=[\n 'date_from', 'date_to'])\n", (1863, 1947), True, 'import pandas as pd\n'), ((2259, 2273), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2270, 2273), False, 'import pickle\n'), ((2410, 2424), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2421, 2424), False, 'import pickle\n'), ((3032, 3046), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3043, 3046), False, 'import pickle\n'), ((3174, 3188), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3185, 3188), False, 'import pickle\n'), ((3771, 3785), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3782, 3785), False, 'import pickle\n'), ((3914, 3928), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3925, 3928), False, 'import pickle\n'), ((4510, 4524), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4521, 4524), False, 'import pickle\n'), ((4650, 4664), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4661, 4664), False, 'import pickle\n'), ((5429, 5443), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5440, 5443), False, 'import pickle\n'), ((5589, 5603), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5600, 5603), False, 'import pickle\n'), ((6237, 6251), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6248, 6251), False, 'import pickle\n'), ((6383, 6397), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6394, 6397), False, 'import pickle\n'), ((6990, 7004), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7001, 7004), False, 'import pickle\n'), ((7137, 7151), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7148, 7151), False, 'import pickle\n'), ((7752, 7766), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7763, 7766), False, 'import pickle\n'), ((7902, 7916), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7913, 7916), False, 'import pickle\n'), ((8514, 8528), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8525, 8528), False, 'import pickle\n'), ((8648, 8662), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8659, 8662), False, 'import pickle\n'), ((9213, 9227), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9224, 9227), False, 'import pickle\n'), ((9349, 9363), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9360, 9363), False, 'import pickle\n'), ((10314, 10328), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10325, 10328), False, 'import pickle\n'), ((11160, 11174), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11171, 11174), False, 'import pickle\n'), ((15778, 15817), 're.sub', 're.sub', (['"""(\\\\\\\\u[0-9A-Fa-f]+)"""', '""""""', 'text'], {}), "('(\\\\\\\\u[0-9A-Fa-f]+)', '', text)\n", (15784, 15817), False, 'import gc, re\n'), ((15832, 15856), 're.sub', 're.sub', (['"""==="""', '""" """', 'text'], {}), "('===', ' ', text)\n", (15838, 15856), False, 'import gc, re\n'), ((16002, 16029), 're.compile', 're.compile', (['u"""[^[:alpha:]]"""'], {}), "(u'[^[:alpha:]]')\n", (16012, 16029), False, 'import gc, re\n'), ((21980, 21992), 'gc.collect', 'gc.collect', ([], {}), '()\n', (21990, 21992), False, 'import gc, re\n'), ((24872, 24894), 'numpy.zeros', 'np.zeros', (['(len_train,)'], {}), '((len_train,))\n', (24880, 24894), True, 'import numpy as np\n'), ((24910, 24931), 'numpy.zeros', 'np.zeros', (['(len_test,)'], {}), '((len_test,))\n', (24918, 24931), True, 'import numpy as np\n'), ((24951, 24979), 'numpy.empty', 'np.empty', (['(NFOLDS, len_test)'], {}), '((NFOLDS, len_test))\n', (24959, 24979), True, 'import numpy as np\n'), ((26688, 26732), 'pandas.merge', 'pd.merge', (['df', 'tmp'], {'how': '"""left"""', 'on': "['price+']"}), "(df, tmp, how='left', on=['price+'])\n", (26696, 26732), True, 'import pandas as pd\n'), ((26927, 26939), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26937, 26939), False, 'import gc, re\n'), ((27115, 27160), 'pandas.merge', 'pd.merge', (['df', 'tmp'], {'how': '"""left"""', 'on': "['param_2']"}), "(df, tmp, how='left', on=['param_2'])\n", (27123, 27160), True, 'import pandas as pd\n'), ((27359, 27371), 'gc.collect', 'gc.collect', ([], {}), '()\n', (27369, 27371), False, 'import gc, re\n'), ((27565, 27619), 'pandas.merge', 'pd.merge', (['df', 'tmp'], {'how': '"""left"""', 'on': "['item_seq_number+']"}), "(df, tmp, how='left', on=['item_seq_number+'])\n", (27573, 27619), True, 'import pandas as pd\n'), ((27854, 27866), 'gc.collect', 'gc.collect', ([], {}), '()\n', (27864, 27866), False, 'import gc, re\n'), ((13761, 13818), 'pandas.concat', 'pd.concat', (['[train_df, train_active, test_df, test_active]'], {}), '([train_df, train_active, test_df, test_active])\n', (13770, 13818), True, 'import pandas as pd\n'), ((17487, 17501), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (17499, 17501), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((22055, 22081), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""russian"""'], {}), "('russian')\n", (22070, 22081), False, 'from nltk.corpus import stopwords\n'), ((25138, 25149), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (25146, 25149), True, 'import numpy as np\n'), ((31198, 31224), 'pandas.DataFrame', 'pd.DataFrame', (['full_df_COPY'], {}), '(full_df_COPY)\n', (31210, 31224), True, 'import pandas as pd\n'), ((31328, 31340), 'gc.collect', 'gc.collect', ([], {}), '()\n', (31338, 31340), False, 'import gc, re\n'), ((32686, 32804), 'pandas.read_csv', 'pd.read_csv', (['"""../input/emb_all_80_itseqcat_price_p4_pr1_catn_rg_ut_p_ict_pc_dll_dnl_rgb_apw_5fold_train.csv"""'], {}), "(\n '../input/emb_all_80_itseqcat_price_p4_pr1_catn_rg_ut_p_ict_pc_dll_dnl_rgb_apw_5fold_train.csv'\n )\n", (32697, 32804), True, 'import pandas as pd\n'), ((32884, 33001), 'pandas.read_csv', 'pd.read_csv', (['"""../input/emb_all_80_itseqcat_price_p4_pr1_catn_rg_ut_p_ict_pc_dll_dnl_rgb_apw_5fold_test.csv"""'], {}), "(\n '../input/emb_all_80_itseqcat_price_p4_pr1_catn_rg_ut_p_ict_pc_dll_dnl_rgb_apw_5fold_test.csv'\n )\n", (32895, 33001), True, 'import pandas as pd\n'), ((33031, 33069), 'pandas.concat', 'pd.concat', (['[nn_oof_train, nn_oof_test]'], {}), '([nn_oof_train, nn_oof_test])\n', (33040, 33069), True, 'import pandas as pd\n'), ((33210, 33266), 'pandas.merge', 'pd.merge', (['full_df', 'nn_oof_full'], {'on': '"""item_id"""', 'how': '"""left"""'}), "(full_df, nn_oof_full, on='item_id', how='left')\n", (33218, 33266), True, 'import pandas as pd\n'), ((33321, 33333), 'gc.collect', 'gc.collect', ([], {}), '()\n', (33331, 33333), False, 'import gc, re\n'), ((33448, 33485), 'pandas.read_csv', 'pd.read_csv', (['"""../input/res12_oof.csv"""'], {}), "('../input/res12_oof.csv')\n", (33459, 33485), True, 'import pandas as pd\n'), ((33575, 33608), 'pandas.read_csv', 'pd.read_csv', (['"""../input/res12.csv"""'], {}), "('../input/res12.csv')\n", (33586, 33608), True, 'import pandas as pd\n'), ((33648, 33686), 'pandas.concat', 'pd.concat', (['[nn_oof_train, nn_oof_test]'], {}), '([nn_oof_train, nn_oof_test])\n', (33657, 33686), True, 'import pandas as pd\n'), ((33827, 33883), 'pandas.merge', 'pd.merge', (['full_df', 'nn_oof_full'], {'on': '"""item_id"""', 'how': '"""left"""'}), "(full_df, nn_oof_full, on='item_id', how='left')\n", (33835, 33883), True, 'import pandas as pd\n'), ((33938, 33950), 'gc.collect', 'gc.collect', ([], {}), '()\n', (33948, 33950), False, 'import gc, re\n'), ((34106, 34171), 'sklearn.cross_validation.KFold', 'KFold', (['len_train'], {'n_folds': 'NFOLDS', 'shuffle': '(True)', 'random_state': 'SEED'}), '(len_train, n_folds=NFOLDS, shuffle=True, random_state=SEED)\n', (34111, 34171), False, 'from sklearn.cross_validation import KFold\n'), ((34461, 34482), 'pandas.DataFrame', 'pd.DataFrame', (['full_df'], {}), '(full_df)\n', (34473, 34482), True, 'import pandas as pd\n'), ((34562, 34583), 'pandas.DataFrame', 'pd.DataFrame', (['full_df'], {}), '(full_df)\n', (34574, 34583), True, 'import pandas as pd\n'), ((34820, 34865), 'numpy.concatenate', 'np.concatenate', (['[sgd_oof_train, sgd_oof_test]'], {}), '([sgd_oof_train, sgd_oof_test])\n', (34834, 34865), True, 'import numpy as np\n'), ((35151, 35196), 'numpy.concatenate', 'np.concatenate', (['[sgd_oof_train, sgd_oof_test]'], {}), '([sgd_oof_train, sgd_oof_test])\n', (35165, 35196), True, 'import numpy as np\n'), ((35665, 35686), 'pandas.DataFrame', 'pd.DataFrame', (['full_df'], {}), '(full_df)\n', (35677, 35686), True, 'import pandas as pd\n'), ((35766, 35787), 'pandas.DataFrame', 'pd.DataFrame', (['full_df'], {}), '(full_df)\n', (35778, 35787), True, 'import pandas as pd\n'), ((36034, 36083), 'numpy.concatenate', 'np.concatenate', (['[ridge_oof_train, ridge_oof_test]'], {}), '([ridge_oof_train, ridge_oof_test])\n', (36048, 36083), True, 'import numpy as np\n'), ((36385, 36434), 'numpy.concatenate', 'np.concatenate', (['[ridge_oof_train, ridge_oof_test]'], {}), '([ridge_oof_train, ridge_oof_test])\n', (36399, 36434), True, 'import numpy as np\n'), ((36915, 36936), 'pandas.DataFrame', 'pd.DataFrame', (['full_df'], {}), '(full_df)\n', (36927, 36936), True, 'import pandas as pd\n'), ((37016, 37037), 'pandas.DataFrame', 'pd.DataFrame', (['full_df'], {}), '(full_df)\n', (37028, 37037), True, 'import pandas as pd\n'), ((37285, 37334), 'numpy.concatenate', 'np.concatenate', (['[ridge_oof_train, ridge_oof_test]'], {}), '([ridge_oof_train, ridge_oof_test])\n', (37299, 37334), True, 'import numpy as np\n'), ((37639, 37688), 'numpy.concatenate', 'np.concatenate', (['[ridge_oof_train, ridge_oof_test]'], {}), '([ridge_oof_train, ridge_oof_test])\n', (37653, 37688), True, 'import numpy as np\n'), ((38546, 38558), 'gc.collect', 'gc.collect', ([], {}), '()\n', (38556, 38558), False, 'import gc, re\n'), ((39111, 39123), 'gc.collect', 'gc.collect', ([], {}), '()\n', (39121, 39123), False, 'import gc, re\n'), ((39966, 39978), 'gc.collect', 'gc.collect', ([], {}), '()\n', (39976, 39978), False, 'import gc, re\n'), ((40801, 40830), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (40812, 40830), True, 'import xgboost as xgb\n'), ((40853, 40882), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_valid', 'y_valid'], {}), '(X_valid, y_valid)\n', (40864, 40882), True, 'import xgboost as xgb\n'), ((40896, 40908), 'gc.collect', 'gc.collect', ([], {}), '()\n', (40906, 40908), False, 'import gc, re\n'), ((40995, 41104), 'xgboost.train', 'xgb.train', (['params', 'tr_data', '(30000)', 'watchlist'], {'maximize': '(False)', 'early_stopping_rounds': '(200)', 'verbose_eval': '(100)'}), '(params, tr_data, 30000, watchlist, maximize=False,\n early_stopping_rounds=200, verbose_eval=100)\n', (41004, 41104), True, 'import xgboost as xgb\n'), ((41615, 41632), 'xgboost.DMatrix', 'xgb.DMatrix', (['test'], {}), '(test)\n', (41626, 41632), True, 'import xgboost as xgb\n'), ((41742, 41811), 'pandas.DataFrame', 'pd.DataFrame', (['lgpred'], {'columns': "['deal_probability']", 'index': 'sub_item_id'}), "(lgpred, columns=['deal_probability'], index=sub_item_id)\n", (41754, 41811), True, 'import pandas as pd\n'), ((42153, 42165), 'gc.collect', 'gc.collect', ([], {}), '()\n', (42163, 42165), False, 'import gc, re\n'), ((15965, 15989), 're.split', 're.split', (['"""(\\\\d+)"""', 'text'], {}), "('(\\\\d+)', text)\n", (15973, 15989), False, 'import gc, re\n'), ((17808, 17820), 'gc.collect', 'gc.collect', ([], {}), '()\n', (17818, 17820), False, 'import gc, re\n'), ((21760, 21772), 'gc.collect', 'gc.collect', ([], {}), '()\n', (21770, 21772), False, 'import gc, re\n'), ((25894, 25921), 'numpy.log', 'np.log', (["(df['price'] + 0.001)"], {}), "(df['price'] + 0.001)\n", (25900, 25921), True, 'import numpy as np\n'), ((25999, 26026), 'numpy.round', 'np.round', (["(df['price'] * 2.8)"], {}), "(df['price'] * 2.8)\n", (26007, 26026), True, 'import numpy as np\n'), ((26077, 26114), 'numpy.round', 'np.round', (["(df['item_seq_number'] / 100)"], {}), "(df['item_seq_number'] / 100)\n", (26085, 26114), True, 'import numpy as np\n'), ((31493, 31505), 'gc.collect', 'gc.collect', ([], {}), '()\n', (31503, 31505), False, 'import gc, re\n'), ((31595, 31607), 'gc.collect', 'gc.collect', ([], {}), '()\n', (31605, 31607), False, 'import gc, re\n'), ((31697, 31709), 'gc.collect', 'gc.collect', ([], {}), '()\n', (31707, 31709), False, 'import gc, re\n'), ((31800, 31812), 'gc.collect', 'gc.collect', ([], {}), '()\n', (31810, 31812), False, 'import gc, re\n'), ((31911, 31923), 'gc.collect', 'gc.collect', ([], {}), '()\n', (31921, 31923), False, 'import gc, re\n'), ((32023, 32035), 'gc.collect', 'gc.collect', ([], {}), '()\n', (32033, 32035), False, 'import gc, re\n'), ((32395, 32407), 'gc.collect', 'gc.collect', ([], {}), '()\n', (32405, 32407), False, 'import gc, re\n'), ((26176, 26211), 'numpy.log', 'np.log', (["df['description_num_chars']"], {}), "(df['description_num_chars'])\n", (26182, 26211), True, 'import numpy as np\n'), ((26261, 26298), 'numpy.log1p', 'np.log1p', (["df['description_num_words']"], {}), "(df['description_num_words'])\n", (26269, 26298), True, 'import numpy as np\n'), ((34731, 34748), 'numpy.array', 'np.array', (['FULL_DF'], {}), '(FULL_DF)\n', (34739, 34748), True, 'import numpy as np\n'), ((34765, 34782), 'numpy.array', 'np.array', (['FULL_DF'], {}), '(FULL_DF)\n', (34773, 34782), True, 'import numpy as np\n'), ((35943, 35960), 'numpy.array', 'np.array', (['FULL_DF'], {}), '(FULL_DF)\n', (35951, 35960), True, 'import numpy as np\n'), ((35977, 35994), 'numpy.array', 'np.array', (['FULL_DF'], {}), '(FULL_DF)\n', (35985, 35994), True, 'import numpy as np\n'), ((37194, 37211), 'numpy.array', 'np.array', (['FULL_DF'], {}), '(FULL_DF)\n', (37202, 37211), True, 'import numpy as np\n'), ((37228, 37245), 'numpy.array', 'np.array', (['FULL_DF'], {}), '(FULL_DF)\n', (37236, 37245), True, 'import numpy as np\n'), ((38821, 38857), 'scipy.sparse.csr_matrix', 'csr_matrix', (['full_df.iloc[:len_train]'], {}), '(full_df.iloc[:len_train])\n', (38831, 38857), False, 'from scipy.sparse import hstack, csr_matrix\n'), ((41539, 41562), 'scipy.sparse.csr_matrix', 'csr_matrix', (['X_test_full'], {}), '(X_test_full)\n', (41549, 41562), False, 'from scipy.sparse import hstack, csr_matrix\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 5 10:02:44 2018
@author: wuxiaochuna
"""
import os
import numpy as np
import time
import PIL
import argparse
parser = argparse.ArgumentParser()
parser.add_agrument('--path_val', type=str, default='..\2011_trainaug\raw_segmentation_results',
help='The directory containing the first model inference.')
parser.add_argument('--path_aug', type=str, default='..\2011_trainaug\raw_segmentation_results',
help='The directory containing the second model inference.')
parser.add_agrument('--path_ori', type=str, default='..\..\voc-test\results\VOC2011\Segmentation\comp6_test_cls',
help='The directory containing the third model inference.')
parser.add_agrument('--path_stacking', type=str, default='..\2011_stacking',
help='Path to the directory to generate the pixel_stacking result.')
FLAGS = parser.parse_args()
names = os.listdir(FLAGS.FLAGS.path_val)
# 对result进行名称转换============================================================
# count = 0
# start_time = time.time()
# for name_old in names:
# name = name_old.split('\'')
# name_new = name[1]
# os.rename(path+"\\"+name_old,path+"\\"+name_new+'.png')
# count += 1
#
# end_time = time.time()
# print("{} images have been renamed, the total time is {}s.".format(count,(end_time-start_time)))
#==============================================================================
def pixelSelect(pixel_0,pixel_1,pixel_2):
pixels = [pixel_0,pixel_1,pixel_2]
counts = np.bincount(pixels)
return np.argmax(counts)
start_time = time.time()
count = 0
for name in names:
img_trainval = PIL.Image.open(FLAGS.path_val+'\\'+name)
img_trainaug = PIL.Image.open(FLAGS.path_aug+'\\'+name)
img_original = PIL.Image.open(FLAGS.path_ori+'\\'+name)
img_val = np.array(img_trainval)
img_aug = np.array(img_trainaug)
img_ori = np.array(img_original)
height = img_val.shape[1]
width = img_val.shape[0]
img_stacking = np.zeros((width,height))
for i in range(width):
for j in range(height):
img_stacking[i][j] = pixelSelect(img_val[i][j],img_aug[i][j],img_ori[i][j])
img = PIL.Image.fromarray(img_stacking).convert('P')
img.save(FLAGS.path_stacking+'\\'+name)
count += 1
end_time = time.time()
print('stacking done!\n{} images done, {}s cost.'.format(count,(end_time-start_time)))
#image = PIL.Image.open(path+'\\2008_000006.png')
#print(pixelSelect(0,2,1))
| [
"PIL.Image.fromarray",
"os.listdir",
"PIL.Image.open",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"time.time",
"numpy.bincount"
] | [((169, 194), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (192, 194), False, 'import argparse\n'), ((990, 1022), 'os.listdir', 'os.listdir', (['FLAGS.FLAGS.path_val'], {}), '(FLAGS.FLAGS.path_val)\n', (1000, 1022), False, 'import os\n'), ((1662, 1673), 'time.time', 'time.time', ([], {}), '()\n', (1671, 1673), False, 'import time\n'), ((2354, 2365), 'time.time', 'time.time', ([], {}), '()\n', (2363, 2365), False, 'import time\n'), ((1600, 1619), 'numpy.bincount', 'np.bincount', (['pixels'], {}), '(pixels)\n', (1611, 1619), True, 'import numpy as np\n'), ((1630, 1647), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (1639, 1647), True, 'import numpy as np\n'), ((1721, 1765), 'PIL.Image.open', 'PIL.Image.open', (["(FLAGS.path_val + '\\\\' + name)"], {}), "(FLAGS.path_val + '\\\\' + name)\n", (1735, 1765), False, 'import PIL\n'), ((1780, 1824), 'PIL.Image.open', 'PIL.Image.open', (["(FLAGS.path_aug + '\\\\' + name)"], {}), "(FLAGS.path_aug + '\\\\' + name)\n", (1794, 1824), False, 'import PIL\n'), ((1839, 1883), 'PIL.Image.open', 'PIL.Image.open', (["(FLAGS.path_ori + '\\\\' + name)"], {}), "(FLAGS.path_ori + '\\\\' + name)\n", (1853, 1883), False, 'import PIL\n'), ((1893, 1915), 'numpy.array', 'np.array', (['img_trainval'], {}), '(img_trainval)\n', (1901, 1915), True, 'import numpy as np\n'), ((1929, 1951), 'numpy.array', 'np.array', (['img_trainaug'], {}), '(img_trainaug)\n', (1937, 1951), True, 'import numpy as np\n'), ((1965, 1987), 'numpy.array', 'np.array', (['img_original'], {}), '(img_original)\n', (1973, 1987), True, 'import numpy as np\n'), ((2063, 2088), 'numpy.zeros', 'np.zeros', (['(width, height)'], {}), '((width, height))\n', (2071, 2088), True, 'import numpy as np\n'), ((2238, 2271), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['img_stacking'], {}), '(img_stacking)\n', (2257, 2271), False, 'import PIL\n')] |
# build_matrix.py
#
# <NAME>
# <EMAIL>
#
# APPM 4380: Project 3. Least Squares Inversion
# Code to build equation matrix.
#
# The Algorithm works by counting the number of points along the trajectory
# of the X-Ray in each grid-box. The number of points tallied correspond
# to the coefficient in the equation matrix.
#
# For rays up to pi/4, the equation generates a list of points in x
# spanning -sqrt(2),sqrt(2), computes y = tan(theta) + C where C determines
# the initial vertical position of the ray. If the angle is greater than
# pi/4., the array generates a vector of y and uses x = y*cot(theta) + C_x
#
__author__ = "<NAME>"
import numpy as np
import math
def update(A,X,Y,N,th,b,S):
for i in range(len(X)):
if(X[i] >= N or Y[i] >= N or Y[i] < 0 or X[i] < 0):
continue
if(abs(th) < np.pi/4):
A[N*Y[i]+X[i]] += 1
b += S[2*Y[i],2*X[i]]
else:
A[N*X[i]+Y[i]] += 1
b += S[2*X[i],2*Y[i]]
return A, b
# Grid dimensions
N = 31
# Number of rays per angle
m = 2*N
# Number of points per ray
n = 2*N
# Generate Theta array
theta_i = -np.pi/2.
theta_f = np.pi/2.
Ntheta = 180
th = np.linspace(theta_i,theta_f,Ntheta)
# Define shape of A matrix
A = np.zeros([m*Ntheta,N**2])
b = np.zeros([m*Ntheta])
#xx = np.linspace(-1,1,N)
#yy = np.linspace(-1,1,N)
#Xx,Yy = np.meshgrid(xx,yy)
#S = RHO0*np.heaviside(1.-Xx**2-Yy**2,0.5)*(0.7-Xx**2-Yy**2)
S = np.genfromtxt("CU_logo.csv",delimiter=",")
# Do the cot theta division and function call outside the loop.
# multiplication is ~ 40 times faster chip level than sin/cos/tan
for t in range(Ntheta):
x = np.linspace(-np.sqrt(2),np.sqrt(2),n)
C = np.linspace(-np.sqrt(2),np.sqrt(2),m)
y = np.zeros([m,n])
for i in range(m):
# Define straight line corresponding to the ray
# Because y is a 2D array and x is 1D, to accomodate the floating
# constant, I'm not changing names when switching from equation
# in x for y (y=tan(theta)*x+C) and (x=cot(theta)*y+C). The reason
# why this works is that when 0<th<pi/4, every point in x is
# sampled while for pi/4<th<pi/2, every point in y is sampled,
# thus it is sufficient to define one spanning vector and define
# the other in terms of it
if(abs(th[t]) > np.pi/4.):
y[i] = -abs(th[t])/th[t]*(x*np.tan(np.pi/2 - abs(th[t])) -C[i])
else:
y[i] = -abs(th[t])/th[t]*(x*np.tan(th[t]) - C[i])
X = (x+1)/2.*(N)
Y = (y[i]+1)/2.*(N)
# Ignore points exactly on the boundary of the grid
# ii = np.unique(np.append(ii,jj))
# X = np.delete(X,ii)
# Y = np.delete(Y,ii)
# X[ii] = N-1
# Y[jj] = N-1
X = X.astype(int)
Y = Y.astype(int)
# ... Otherwise, tally up that point in A. Vectorize for
# maximum performance
A[t*m+i], b[t*m+i] = update(A[t*m+i],X,Y,N,th[t],b[t*m+i],S)
ii = np.where(np.all(A == 0, axis=1))
A = A[~np.all(A == 0, axis=1)]
b = np.delete(b,ii)
#for i in range(len(A)):
# A[i] /= np.linalg.linalg.norm(A[i])
# b[i] /= np.linalg.linalg.norm(A[i])
| [
"numpy.sqrt",
"numpy.tan",
"numpy.delete",
"numpy.linspace",
"numpy.zeros",
"numpy.all",
"numpy.genfromtxt"
] | [((1177, 1214), 'numpy.linspace', 'np.linspace', (['theta_i', 'theta_f', 'Ntheta'], {}), '(theta_i, theta_f, Ntheta)\n', (1188, 1214), True, 'import numpy as np\n'), ((1245, 1275), 'numpy.zeros', 'np.zeros', (['[m * Ntheta, N ** 2]'], {}), '([m * Ntheta, N ** 2])\n', (1253, 1275), True, 'import numpy as np\n'), ((1275, 1297), 'numpy.zeros', 'np.zeros', (['[m * Ntheta]'], {}), '([m * Ntheta])\n', (1283, 1297), True, 'import numpy as np\n'), ((1445, 1488), 'numpy.genfromtxt', 'np.genfromtxt', (['"""CU_logo.csv"""'], {'delimiter': '""","""'}), "('CU_logo.csv', delimiter=',')\n", (1458, 1488), True, 'import numpy as np\n'), ((3188, 3204), 'numpy.delete', 'np.delete', (['b', 'ii'], {}), '(b, ii)\n', (3197, 3204), True, 'import numpy as np\n'), ((1764, 1780), 'numpy.zeros', 'np.zeros', (['[m, n]'], {}), '([m, n])\n', (1772, 1780), True, 'import numpy as np\n'), ((3129, 3151), 'numpy.all', 'np.all', (['(A == 0)'], {'axis': '(1)'}), '(A == 0, axis=1)\n', (3135, 3151), True, 'import numpy as np\n'), ((1688, 1698), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1695, 1698), True, 'import numpy as np\n'), ((1738, 1748), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1745, 1748), True, 'import numpy as np\n'), ((3160, 3182), 'numpy.all', 'np.all', (['(A == 0)'], {'axis': '(1)'}), '(A == 0, axis=1)\n', (3166, 3182), True, 'import numpy as np\n'), ((1677, 1687), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1684, 1687), True, 'import numpy as np\n'), ((1727, 1737), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1734, 1737), True, 'import numpy as np\n'), ((2546, 2559), 'numpy.tan', 'np.tan', (['th[t]'], {}), '(th[t])\n', (2552, 2559), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# pylint: disable=too-many-instance-attributes
"""
Handling of logs and plots for learning
"""
import os
import sys
script_dir = os.path.dirname(__file__)
parent_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
sys.path.insert(1, parent_dir)
import shutil
import pickle
from dataclasses import dataclass
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
def open_file(path, mode):
"""
Attempts to open file at path.
Tried up to max_attempts times because of intermittent permission errors on windows
"""
max_attempts = 100
f = None
for _ in range(max_attempts): # pragma: no branch
try:
f = open(path, mode)
except PermissionError: # pragma: no cover
continue
break
return f
def make_directory(path):
"""
Attempts to create directory at path.
Tried up to max_attempts times because of intermittent permission errors on windows
"""
max_attempts = 100
for _ in range(max_attempts): # pragma: no branch
try:
os.mkdir(path)
except PermissionError: # pragma: no cover
continue
break
def get_log_folder(log_name):
""" Returns log folder as string """
return parent_dir + '/logs/log_' + log_name
def clear_logs(log_name):
""" Clears previous log folders of same same """
log_folder = get_log_folder(log_name)
try:
shutil.rmtree(log_folder)
except FileNotFoundError: # pragma: no cover
pass
make_directory(log_folder)
fitness_log_path = log_folder + '/fitness_log.txt'
population_log_path = log_folder + '/population_log.txt'
open(fitness_log_path, "x")
open(population_log_path, "x")
def log_best_individual(log_name, best_individual):
""" Saves the best individual """
with open_file(get_log_folder(log_name) + '/best_individual.pickle', 'wb') as f:
pickle.dump(best_individual, f)
def log_fitness(log_name, fitness):
""" Logs fitness of all individuals """
with open_file(get_log_folder(log_name) + '/fitness_log.txt', 'a') as f:
f.write("%s\n" % fitness)
def log_best_fitness(log_name, best_fitness):
""" Logs best fitness of each generation """
with open_file(get_log_folder(log_name) + '/best_fitness_log.pickle', 'wb') as f:
pickle.dump(best_fitness, f)
def log_n_episodes(log_name, n_episodes):
""" Logs number of episodes """
with open_file(get_log_folder(log_name) + '/n_episodes_log.pickle', 'wb') as f:
pickle.dump(n_episodes, f)
def log_population(log_name, population):
""" Logs full population of the generation"""
with open_file(get_log_folder(log_name) + '/population_log.txt', 'a') as f:
f.write("%s\n" % population)
def log_settings(log_name, settings):
""" Logs settings used for the run """
with open_file(get_log_folder(log_name) + '/settings.txt', 'a') as f:
for key, value in vars(settings).items():
f.write(key + ' ' + str(value) + '\n')
def get_best_fitness(log_name):
""" Gets the best fitness list from the given log """
with open_file(get_log_folder(log_name) + '/best_fitness_log.pickle', 'rb') as f:
best_fitness = pickle.load(f)
return best_fitness
def get_n_episodes(log_name):
""" Gets the list of n_episodes from the given log """
with open_file(get_log_folder(log_name) + '/n_episodes_log.pickle', 'rb') as f:
n_episodes = pickle.load(f)
return n_episodes
def get_last_line(file_name):
""" Returns the last line of the given file """
with open_file(file_name, 'rb') as f:
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR)
last_line = f.readline().decode()
return last_line
def get_last_population(log_name):
""" Gets the last population list from the given log """
return get_last_line(get_log_folder(log_name) + '/population_log.txt')
def get_last_fitness(log_name):
""" Get the fitness list from the given log """
return get_last_line(get_log_folder(log_name) + '/fitness_log.txt')
def get_best_individual(log_name):
""" Return the best individual from the given log """
with open_file(get_log_folder(log_name) + '/best_individual.pickle', 'rb') as f:
best_individual = pickle.load(f)
return best_individual
def plot_fitness(log_name, fitness, n_episodes=None):
"""
Plots fitness over iterations or individuals
"""
if n_episodes is not None:
plt.plot(n_episodes, fitness)
plt.xlabel("Episodes")
else:
plt.plot(fitness)
plt.xlabel("Generation")
plt.ylabel("Fitness")
plt.savefig(get_log_folder(log_name) + '/Fitness.svg')
plt.close()
@dataclass
class PlotParameters:
""" Data class for parameters for plotting """
plot_mean: bool = True #Plot the mean of the logs
mean_color: str = 'b' #Color for mean curve
plot_std: bool = False #Plot the standard deviation
std_color: str = 'b' #Color of the std fill
plot_ind: bool = False #Plot each individual log
ind_color: str = 'aquamarine' #Ind color
legend_name: str = '' #Legend name
legend_fsize: float = 16.0 #Set font size of legend
title: str = '' #Plot title
title_fsize: float = 18.0 #Set font size of title
xlabel: str = '' #Label of x axis
x_max: int = 0 #Upper limit of x axis
ylabel: str = '' #Label of y axis
label_fsize: float = 16.0 ##Set font size of axis labels
extrapolate_y: bool = True #Extrapolate y as constant to x_max
plot_optimal: bool = True #Plot optimal value as thin vertical line
optimum: float = 0 #Optimal value to plot
save_fig: bool = True #Save figure. If false, more plots is possible.
path: str = 'logs/plot.png' #Path to save log
def plot_learning_curves(logs, parameters):
"""
Plots mean and standard deviation of a number of logs in the same figure
"""
fitness = []
n_episodes = []
for log_name in logs:
fitness.append(get_best_fitness(log_name))
n_episodes.append(get_n_episodes(log_name))
fitness = np.array(fitness)
n_episodes = np.array(n_episodes)
n_logs = len(logs)
startx = np.max(n_episodes[:, 0])
endx = np.min(n_episodes[:, -1])
if parameters.extrapolate_y:
x = np.arange(startx, parameters.x_max + 1)
else:
x = np.arange(startx, endx + 1)
y = np.zeros((len(x), n_logs))
for i in range(0, n_logs):
f = interpolate.interp1d(n_episodes[i, :], fitness[i, :], bounds_error=False)
y[:, i] = f(x)
if parameters.extrapolate_y:
n_extrapolated = int(parameters.x_max - n_episodes[i, -1])
if n_extrapolated > 0:
left = y[:n_episodes[i, -1] - n_episodes[i, 0] + 1, i]
y[:, i] = np.concatenate((left, np.full(n_extrapolated, left[-1])))
if parameters.plot_ind:
plt.plot(x, y[:, i], color=parameters.ind_color, linestyle='dashed', linewidth=1)
y_mean = np.mean(y, axis=1)
if parameters.plot_mean:
plt.plot(x, y_mean, color=parameters.mean_color, label=parameters.legend_name)
y_std = np.std(y, axis=1)
if parameters.plot_std:
plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=.1, color=parameters.std_color)
plt.legend(loc="lower right", prop={'size': parameters.legend_fsize})
plt.xlabel(parameters.xlabel, fontsize=parameters.label_fsize)
if parameters.x_max > 0:
plt.xlim(0, parameters.x_max)
plt.ylabel(parameters.ylabel, fontsize=parameters.label_fsize)
plt.title(parameters.title, fontsize=parameters.title_fsize, wrap=True)
if parameters.save_fig:
if parameters.plot_optimal:
plt.plot([0, parameters.x_max], \
[parameters.optimum, parameters.optimum], \
color='k', linestyle='dashed', linewidth=1)
plt.savefig(parameters.path, format='svg', dpi=300)
plt.close()
| [
"sys.path.insert",
"matplotlib.pyplot.ylabel",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.fill_between",
"numpy.array",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"os.mkdir",
"numpy.min",
"matplotlib.pyp... | [((153, 178), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (168, 178), False, 'import os\n'), ((245, 275), 'sys.path.insert', 'sys.path.insert', (['(1)', 'parent_dir'], {}), '(1, parent_dir)\n', (260, 275), False, 'import sys\n'), ((208, 243), 'os.path.join', 'os.path.join', (['script_dir', 'os.pardir'], {}), '(script_dir, os.pardir)\n', (220, 243), False, 'import os\n'), ((4683, 4704), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fitness"""'], {}), "('Fitness')\n", (4693, 4704), True, 'import matplotlib.pyplot as plt\n'), ((4768, 4779), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4777, 4779), True, 'import matplotlib.pyplot as plt\n'), ((6336, 6353), 'numpy.array', 'np.array', (['fitness'], {}), '(fitness)\n', (6344, 6353), True, 'import numpy as np\n'), ((6371, 6391), 'numpy.array', 'np.array', (['n_episodes'], {}), '(n_episodes)\n', (6379, 6391), True, 'import numpy as np\n'), ((6429, 6453), 'numpy.max', 'np.max', (['n_episodes[:, 0]'], {}), '(n_episodes[:, 0])\n', (6435, 6453), True, 'import numpy as np\n'), ((6465, 6490), 'numpy.min', 'np.min', (['n_episodes[:, -1]'], {}), '(n_episodes[:, -1])\n', (6471, 6490), True, 'import numpy as np\n'), ((7239, 7257), 'numpy.mean', 'np.mean', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (7246, 7257), True, 'import numpy as np\n'), ((7386, 7403), 'numpy.std', 'np.std', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (7392, 7403), True, 'import numpy as np\n'), ((7535, 7604), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'prop': "{'size': parameters.legend_fsize}"}), "(loc='lower right', prop={'size': parameters.legend_fsize})\n", (7545, 7604), True, 'import matplotlib.pyplot as plt\n'), ((7609, 7671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['parameters.xlabel'], {'fontsize': 'parameters.label_fsize'}), '(parameters.xlabel, fontsize=parameters.label_fsize)\n', (7619, 7671), True, 'import matplotlib.pyplot as plt\n'), ((7743, 7805), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['parameters.ylabel'], {'fontsize': 'parameters.label_fsize'}), '(parameters.ylabel, fontsize=parameters.label_fsize)\n', (7753, 7805), True, 'import matplotlib.pyplot as plt\n'), ((7810, 7881), 'matplotlib.pyplot.title', 'plt.title', (['parameters.title'], {'fontsize': 'parameters.title_fsize', 'wrap': '(True)'}), '(parameters.title, fontsize=parameters.title_fsize, wrap=True)\n', (7819, 7881), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1483), 'shutil.rmtree', 'shutil.rmtree', (['log_folder'], {}), '(log_folder)\n', (1471, 1483), False, 'import shutil\n'), ((1945, 1976), 'pickle.dump', 'pickle.dump', (['best_individual', 'f'], {}), '(best_individual, f)\n', (1956, 1976), False, 'import pickle\n'), ((2359, 2387), 'pickle.dump', 'pickle.dump', (['best_fitness', 'f'], {}), '(best_fitness, f)\n', (2370, 2387), False, 'import pickle\n'), ((2559, 2585), 'pickle.dump', 'pickle.dump', (['n_episodes', 'f'], {}), '(n_episodes, f)\n', (2570, 2585), False, 'import pickle\n'), ((3253, 3267), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3264, 3267), False, 'import pickle\n'), ((3487, 3501), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3498, 3501), False, 'import pickle\n'), ((4348, 4362), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4359, 4362), False, 'import pickle\n'), ((4549, 4578), 'matplotlib.pyplot.plot', 'plt.plot', (['n_episodes', 'fitness'], {}), '(n_episodes, fitness)\n', (4557, 4578), True, 'import matplotlib.pyplot as plt\n'), ((4587, 4609), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episodes"""'], {}), "('Episodes')\n", (4597, 4609), True, 'import matplotlib.pyplot as plt\n'), ((4628, 4645), 'matplotlib.pyplot.plot', 'plt.plot', (['fitness'], {}), '(fitness)\n', (4636, 4645), True, 'import matplotlib.pyplot as plt\n'), ((4654, 4678), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generation"""'], {}), "('Generation')\n", (4664, 4678), True, 'import matplotlib.pyplot as plt\n'), ((6536, 6575), 'numpy.arange', 'np.arange', (['startx', '(parameters.x_max + 1)'], {}), '(startx, parameters.x_max + 1)\n', (6545, 6575), True, 'import numpy as np\n'), ((6598, 6625), 'numpy.arange', 'np.arange', (['startx', '(endx + 1)'], {}), '(startx, endx + 1)\n', (6607, 6625), True, 'import numpy as np\n'), ((6704, 6777), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['n_episodes[i, :]', 'fitness[i, :]'], {'bounds_error': '(False)'}), '(n_episodes[i, :], fitness[i, :], bounds_error=False)\n', (6724, 6777), False, 'from scipy import interpolate\n'), ((7295, 7373), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_mean'], {'color': 'parameters.mean_color', 'label': 'parameters.legend_name'}), '(x, y_mean, color=parameters.mean_color, label=parameters.legend_name)\n', (7303, 7373), True, 'import matplotlib.pyplot as plt\n'), ((7440, 7535), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(y_mean - y_std)', '(y_mean + y_std)'], {'alpha': '(0.1)', 'color': 'parameters.std_color'}), '(x, y_mean - y_std, y_mean + y_std, alpha=0.1, color=\n parameters.std_color)\n', (7456, 7535), True, 'import matplotlib.pyplot as plt\n'), ((7709, 7738), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'parameters.x_max'], {}), '(0, parameters.x_max)\n', (7717, 7738), True, 'import matplotlib.pyplot as plt\n'), ((8130, 8181), 'matplotlib.pyplot.savefig', 'plt.savefig', (['parameters.path'], {'format': '"""svg"""', 'dpi': '(300)'}), "(parameters.path, format='svg', dpi=300)\n", (8141, 8181), True, 'import matplotlib.pyplot as plt\n'), ((8190, 8201), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8199, 8201), True, 'import matplotlib.pyplot as plt\n'), ((1097, 1111), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1105, 1111), False, 'import os\n'), ((7143, 7228), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y[:, i]'], {'color': 'parameters.ind_color', 'linestyle': '"""dashed"""', 'linewidth': '(1)'}), "(x, y[:, i], color=parameters.ind_color, linestyle='dashed',\n linewidth=1)\n", (7151, 7228), True, 'import matplotlib.pyplot as plt\n'), ((7958, 8079), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, parameters.x_max]', '[parameters.optimum, parameters.optimum]'], {'color': '"""k"""', 'linestyle': '"""dashed"""', 'linewidth': '(1)'}), "([0, parameters.x_max], [parameters.optimum, parameters.optimum],\n color='k', linestyle='dashed', linewidth=1)\n", (7966, 8079), True, 'import matplotlib.pyplot as plt\n'), ((7063, 7096), 'numpy.full', 'np.full', (['n_extrapolated', 'left[-1]'], {}), '(n_extrapolated, left[-1])\n', (7070, 7096), True, 'import numpy as np\n')] |
import numpy as np
from scipy import linalg
from pressio4py import logger, solvers, ode
class MySys1:
def createResidual(self):
return np.zeros(5)
def createJacobian(self):
return np.zeros((5,2))
def residual(self, stateIn, R):
for i in range(5):
R[i] = float(i)
def jacobian(self, stateIn, J):
count = 0.
for i in range(J.shape[0]):
for j in range(J.shape[1]):
J[i,j] = float(count)
count += 1.
class MyLinSolver:
def solve(self, A,b,x):
print(x)
print("Python Lin solver")
gold_A = np.array([[120., 140.], [140., 165]])
assert(np.allclose(gold_A, A))
gold_b = np.array([-60., -70.])
assert(np.allclose(gold_b, b))
x[:] = 1
def test_gn_neq_1():
logger.initialize(logger.logto.terminal)
logger.setVerbosity([logger.loglevel.debug])
state = np.ones(2)
sys = MySys1()
lsO = MyLinSolver()
print("lsO address = ", hex(id(lsO)))
nlsO = solvers.create_gauss_newton(sys, state, lsO)
nlsO.setUpdatingCriterion(solvers.update.Standard)
nlsO.setMaxIterations(2)
nlsO.solve(sys, state)
print(state)
assert(np.allclose(state, np.array([3., 3.])))
logger.finalize()
class RosenbrockSys:
def createResidual(self):
return np.zeros(6)
def createJacobian(self):
return np.zeros((6,4))
def residual(self, x, R):
x1,x2,x3,x4 = x[0],x[1],x[2],x[3]
R[0] = 10.*(x4 - x3*x3)
R[1] = 10.*(x3 - x2*x2)
R[2] = 10.*(x2 - x1*x1)
R[3] = (1.-x1)
R[4] = (1.-x2)
R[5] = (1.-x3)
def jacobian(self, x, J):
x1,x2,x3 = x[0],x[1],x[2]
J[0,2] = -20.*x3
J[0,3] = 10.
J[1,1] = -20.*x2
J[1,2] = 10.
J[2,0] = -20.*x1
J[2,1] = 10.
J[3,0] = -1.
J[4,1] = -1.
J[5,2] = -1.
class MyLinSolver2:
def solve(self, A,b,x):
lumat, piv, info = linalg.lapack.dgetrf(A, overwrite_a=False)
x[:], info = linalg.lapack.dgetrs(lumat, piv, b, 0, 0)
def test_gn_neq_rosenbrock():
print("\n")
logger.initialize(logger.logto.terminal)
logger.setVerbosity([logger.loglevel.debug])
state = np.array([-0.05, 1.1, 1.2, 1.5])
sys = RosenbrockSys()
lsO = MyLinSolver2()
nlsO = solvers.create_gauss_newton(sys, state, lsO)
nlsO.setTolerance(1e-5)
nlsO.solve(sys, state)
print(state)
gold = np.array([1.00000001567414e+00,
9.99999999124769e-01,
9.99999996519930e-01,
9.99999988898883e-01])
assert(np.allclose(gold, state))
logger.finalize()
| [
"pressio4py.logger.initialize",
"numpy.allclose",
"numpy.ones",
"pressio4py.logger.finalize",
"numpy.array",
"numpy.zeros",
"scipy.linalg.lapack.dgetrf",
"scipy.linalg.lapack.dgetrs",
"pressio4py.solvers.create_gauss_newton",
"pressio4py.logger.setVerbosity"
] | [((741, 781), 'pressio4py.logger.initialize', 'logger.initialize', (['logger.logto.terminal'], {}), '(logger.logto.terminal)\n', (758, 781), False, 'from pressio4py import logger, solvers, ode\n'), ((784, 828), 'pressio4py.logger.setVerbosity', 'logger.setVerbosity', (['[logger.loglevel.debug]'], {}), '([logger.loglevel.debug])\n', (803, 828), False, 'from pressio4py import logger, solvers, ode\n'), ((840, 850), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (847, 850), True, 'import numpy as np\n'), ((939, 983), 'pressio4py.solvers.create_gauss_newton', 'solvers.create_gauss_newton', (['sys', 'state', 'lsO'], {}), '(sys, state, lsO)\n', (966, 983), False, 'from pressio4py import logger, solvers, ode\n'), ((1156, 1173), 'pressio4py.logger.finalize', 'logger.finalize', ([], {}), '()\n', (1171, 1173), False, 'from pressio4py import logger, solvers, ode\n'), ((1955, 1995), 'pressio4py.logger.initialize', 'logger.initialize', (['logger.logto.terminal'], {}), '(logger.logto.terminal)\n', (1972, 1995), False, 'from pressio4py import logger, solvers, ode\n'), ((1998, 2042), 'pressio4py.logger.setVerbosity', 'logger.setVerbosity', (['[logger.loglevel.debug]'], {}), '([logger.loglevel.debug])\n', (2017, 2042), False, 'from pressio4py import logger, solvers, ode\n'), ((2054, 2086), 'numpy.array', 'np.array', (['[-0.05, 1.1, 1.2, 1.5]'], {}), '([-0.05, 1.1, 1.2, 1.5])\n', (2062, 2086), True, 'import numpy as np\n'), ((2143, 2187), 'pressio4py.solvers.create_gauss_newton', 'solvers.create_gauss_newton', (['sys', 'state', 'lsO'], {}), '(sys, state, lsO)\n', (2170, 2187), False, 'from pressio4py import logger, solvers, ode\n'), ((2264, 2353), 'numpy.array', 'np.array', (['[1.00000001567414, 0.999999999124769, 0.99999999651993, 0.999999988898883]'], {}), '([1.00000001567414, 0.999999999124769, 0.99999999651993, \n 0.999999988898883])\n', (2272, 2353), True, 'import numpy as np\n'), ((2417, 2441), 'numpy.allclose', 'np.allclose', (['gold', 'state'], {}), '(gold, state)\n', (2428, 2441), True, 'import numpy as np\n'), ((2445, 2462), 'pressio4py.logger.finalize', 'logger.finalize', ([], {}), '()\n', (2460, 2462), False, 'from pressio4py import logger, solvers, ode\n'), ((143, 154), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (151, 154), True, 'import numpy as np\n'), ((195, 211), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (203, 211), True, 'import numpy as np\n'), ((560, 600), 'numpy.array', 'np.array', (['[[120.0, 140.0], [140.0, 165]]'], {}), '([[120.0, 140.0], [140.0, 165]])\n', (568, 600), True, 'import numpy as np\n'), ((609, 631), 'numpy.allclose', 'np.allclose', (['gold_A', 'A'], {}), '(gold_A, A)\n', (620, 631), True, 'import numpy as np\n'), ((646, 670), 'numpy.array', 'np.array', (['[-60.0, -70.0]'], {}), '([-60.0, -70.0])\n', (654, 670), True, 'import numpy as np\n'), ((680, 702), 'numpy.allclose', 'np.allclose', (['gold_b', 'b'], {}), '(gold_b, b)\n', (691, 702), True, 'import numpy as np\n'), ((1132, 1152), 'numpy.array', 'np.array', (['[3.0, 3.0]'], {}), '([3.0, 3.0])\n', (1140, 1152), True, 'import numpy as np\n'), ((1236, 1247), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (1244, 1247), True, 'import numpy as np\n'), ((1288, 1304), 'numpy.zeros', 'np.zeros', (['(6, 4)'], {}), '((6, 4))\n', (1296, 1304), True, 'import numpy as np\n'), ((1806, 1848), 'scipy.linalg.lapack.dgetrf', 'linalg.lapack.dgetrf', (['A'], {'overwrite_a': '(False)'}), '(A, overwrite_a=False)\n', (1826, 1848), False, 'from scipy import linalg\n'), ((1866, 1907), 'scipy.linalg.lapack.dgetrs', 'linalg.lapack.dgetrs', (['lumat', 'piv', 'b', '(0)', '(0)'], {}), '(lumat, piv, b, 0, 0)\n', (1886, 1907), False, 'from scipy import linalg\n')] |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import pandas
import numpy as np
import pyarrow
import pytest
import re
from modin.config import IsExperimental, Engine, StorageFormat
from modin.pandas.test.utils import io_ops_bad_exc
from .utils import eval_io, ForceOmnisciImport, set_execution_mode, run_and_compare
from pandas.core.dtypes.common import is_list_like
IsExperimental.put(True)
Engine.put("native")
StorageFormat.put("omnisci")
import modin.pandas as pd
from modin.pandas.test.utils import (
df_equals,
bool_arg_values,
to_pandas,
test_data_values,
test_data_keys,
generate_multiindex,
eval_general,
df_equals_with_non_stable_indices,
)
from modin.utils import try_cast_to_pandas
from modin.experimental.core.execution.native.implementations.omnisci_on_native.partitioning.partition_manager import (
OmnisciOnNativeDataframePartitionManager,
)
from modin.experimental.core.execution.native.implementations.omnisci_on_native.df_algebra import (
FrameNode,
)
@pytest.mark.usefixtures("TestReadCSVFixture")
class TestCSV:
from modin import __file__ as modin_root
root = os.path.dirname(
os.path.dirname(os.path.abspath(modin_root)) + ".."
) # root of modin repo
boston_housing_names = [
"index",
"CRIM",
"ZN",
"INDUS",
"CHAS",
"NOX",
"RM",
"AGE",
"DIS",
"RAD",
"TAX",
"PTRATIO",
"B",
"LSTAT",
"PRICE",
]
boston_housing_dtypes = {
"index": "int64",
"CRIM": "float64",
"ZN": "float64",
"INDUS": "float64",
"CHAS": "float64",
"NOX": "float64",
"RM": "float64",
"AGE": "float64",
"DIS": "float64",
"RAD": "float64",
"TAX": "float64",
"PTRATIO": "float64",
"B": "float64",
"LSTAT": "float64",
"PRICE": "float64",
}
def test_usecols_csv(self):
"""check with the following arguments: names, dtype, skiprows, delimiter"""
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
for kwargs in (
{"delimiter": ","},
{"sep": None},
{"skiprows": 1, "names": ["A", "B", "C", "D", "E"]},
{"dtype": {"a": "int32", "e": "string"}},
{"dtype": {"a": np.dtype("int32"), "b": np.dtype("int64"), "e": "string"}},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_housing_csv(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_time_parsing(self):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_time_parsing.csv"
)
for kwargs in (
{
"skiprows": 1,
"names": [
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
"parse_dates": ["timestamp"],
"dtype": {"symbol": "string"},
},
):
rp = pandas.read_csv(csv_file, **kwargs)
rm = pd.read_csv(csv_file, engine="arrow", **kwargs)
with ForceOmnisciImport(rm):
rm = to_pandas(rm)
df_equals(rm["timestamp"].dt.year, rp["timestamp"].dt.year)
df_equals(rm["timestamp"].dt.month, rp["timestamp"].dt.month)
df_equals(rm["timestamp"].dt.day, rp["timestamp"].dt.day)
def test_csv_fillna(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
comparator=lambda df1, df2: df_equals(
df1["CRIM"].fillna(1000), df2["CRIM"].fillna(1000)
),
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
@pytest.mark.parametrize("null_dtype", ["category", "float64"])
def test_null_col(self, null_dtype):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_null_col.csv"
)
ref = pandas.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
ref["a"] = ref["a"] + ref["b"]
exp = pd.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
exp["a"] = exp["a"] + exp["b"]
# df_equals cannot compare empty categories
if null_dtype == "category":
ref["c"] = ref["c"].astype("string")
with ForceOmnisciImport(exp):
exp = to_pandas(exp)
exp["c"] = exp["c"].astype("string")
df_equals(ref, exp)
def test_read_and_concat(self):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
ref1 = pandas.read_csv(csv_file)
ref2 = pandas.read_csv(csv_file)
ref = pandas.concat([ref1, ref2])
exp1 = pandas.read_csv(csv_file)
exp2 = pandas.read_csv(csv_file)
exp = pd.concat([exp1, exp2])
with ForceOmnisciImport(exp):
df_equals(ref, exp)
@pytest.mark.parametrize("names", [None, ["a", "b", "c", "d", "e"]])
@pytest.mark.parametrize("header", [None, 0])
def test_from_csv(self, header, names):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=csv_file,
header=header,
names=names,
)
@pytest.mark.parametrize("kwargs", [{"sep": "|"}, {"delimiter": "|"}])
def test_sep_delimiter(self, kwargs):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_delim.csv")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
@pytest.mark.skip(reason="https://github.com/modin-project/modin/issues/2174")
def test_float32(self):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
kwargs = {
"dtype": {"a": "float32", "b": "float32"},
}
pandas_df = pandas.read_csv(csv_file, **kwargs)
pandas_df["a"] = pandas_df["a"] + pandas_df["b"]
modin_df = pd.read_csv(csv_file, **kwargs, engine="arrow")
modin_df["a"] = modin_df["a"] + modin_df["b"]
with ForceOmnisciImport(modin_df):
df_equals(modin_df, pandas_df)
# Datetime Handling tests
@pytest.mark.parametrize("engine", [None, "arrow"])
@pytest.mark.parametrize(
"parse_dates",
[
True,
False,
["col2"],
["c2"],
[["col2", "col3"]],
{"col23": ["col2", "col3"]},
],
)
@pytest.mark.parametrize("names", [None, [f"c{x}" for x in range(1, 7)]])
def test_read_csv_datetime(
self,
engine,
parse_dates,
names,
):
parse_dates_unsupported = isinstance(parse_dates, dict) or (
isinstance(parse_dates, list) and isinstance(parse_dates[0], list)
)
if parse_dates_unsupported and engine == "arrow" and not names:
pytest.skip(
"In these cases Modin raises `ArrowEngineException` while pandas "
"doesn't raise any exceptions that causes tests fails"
)
# In these cases Modin raises `ArrowEngineException` while pandas
# raises `ValueError`, so skipping exception type checking
skip_exc_type_check = parse_dates_unsupported and engine == "arrow"
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": engine},
check_exception_type=not skip_exc_type_check,
raising_exceptions=None if skip_exc_type_check else io_ops_bad_exc,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
parse_dates=parse_dates,
names=names,
)
@pytest.mark.parametrize("engine", [None, "arrow"])
@pytest.mark.parametrize(
"usecols",
[
None,
["col1"],
["col1", "col1"],
["col1", "col2", "col6"],
["col6", "col2", "col1"],
[0],
[0, 0],
[0, 1, 5],
[5, 1, 0],
lambda x: x in ["col1", "col2"],
],
)
def test_read_csv_col_handling(
self,
engine,
usecols,
):
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(usecols),
md_extra_kwargs={"engine": engine},
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
usecols=usecols,
)
class TestMasks:
data = {
"a": [1, 1, 2, 2, 3],
"b": [None, None, 2, 1, 3],
"c": [3, None, None, 2, 1],
}
cols_values = ["a", ["a", "b"], ["a", "b", "c"]]
@pytest.mark.parametrize("cols", cols_values)
def test_projection(self, cols):
def projection(df, cols, **kwargs):
return df[cols]
run_and_compare(projection, data=self.data, cols=cols)
def test_drop(self):
def drop(df, **kwargs):
return df.drop(columns="a")
run_and_compare(drop, data=self.data)
def test_iloc(self):
def mask(df, **kwargs):
return df.iloc[[0, 1]]
run_and_compare(mask, data=self.data, allow_subqueries=True)
def test_empty(self):
def empty(df, **kwargs):
return df
run_and_compare(empty, data=None)
def test_filter(self):
def filter(df, **kwargs):
return df[df["a"] == 1]
run_and_compare(filter, data=self.data)
def test_filter_with_index(self):
def filter(df, **kwargs):
df = df.groupby("a").sum()
return df[df["b"] > 1]
run_and_compare(filter, data=self.data)
def test_filter_proj(self):
def filter(df, **kwargs):
df1 = df + 2
return df1[(df["a"] + df1["b"]) > 1]
run_and_compare(filter, data=self.data)
def test_filter_drop(self):
def filter(df, **kwargs):
df = df[["a", "b"]]
df = df[df["a"] != 1]
df["a"] = df["a"] * df["b"]
return df
run_and_compare(filter, data=self.data)
class TestMultiIndex:
data = {"a": np.arange(24), "b": np.arange(24)}
@pytest.mark.parametrize("names", [None, ["", ""], ["name", "name"]])
def test_dup_names(self, names):
index = pandas.MultiIndex.from_tuples(
[(i, j) for i in range(3) for j in range(8)], names=names
)
pandas_df = pandas.DataFrame(self.data, index=index) + 1
modin_df = pd.DataFrame(self.data, index=index) + 1
df_equals(pandas_df, modin_df)
@pytest.mark.parametrize(
"names",
[
None,
[None, "s", None],
["i1", "i2", "i3"],
["i1", "i1", "i3"],
["i1", "i2", "a"],
],
)
def test_reset_index(self, names):
index = pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)],
names=names,
)
def applier(lib):
df = lib.DataFrame(self.data, index=index) + 1
return df.reset_index()
eval_general(pd, pandas, applier)
@pytest.mark.parametrize("is_multiindex", [True, False])
@pytest.mark.parametrize(
"column_names", [None, ["level1", None], ["level1", "level2"]]
)
def test_reset_index_multicolumns(self, is_multiindex, column_names):
index = (
pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)],
names=["l1", "l2", "l3"],
)
if is_multiindex
else pandas.Index(np.arange(len(self.data["a"])), name="index")
)
columns = pandas.MultiIndex.from_tuples(
[("a", "b"), ("b", "c")], names=column_names
)
data = np.array(list(self.data.values())).T
def applier(df, **kwargs):
df = df + 1
return df.reset_index(drop=False)
run_and_compare(
fn=applier,
data=data,
constructor_kwargs={"index": index, "columns": columns},
)
def test_set_index_name(self):
index = pandas.Index.__new__(pandas.Index, data=[i for i in range(24)])
pandas_df = pandas.DataFrame(self.data, index=index)
pandas_df.index.name = "new_name"
modin_df = pd.DataFrame(self.data, index=index)
modin_df._query_compiler.set_index_name("new_name")
df_equals(pandas_df, modin_df)
def test_set_index_names(self):
index = pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)]
)
pandas_df = pandas.DataFrame(self.data, index=index)
pandas_df.index.names = ["new_name1", "new_name2", "new_name3"]
modin_df = pd.DataFrame(self.data, index=index)
modin_df._query_compiler.set_index_names(
["new_name1", "new_name2", "new_name3"]
)
df_equals(pandas_df, modin_df)
class TestFillna:
data = {"a": [1, 1, None], "b": [None, None, 2], "c": [3, None, None]}
values = [1, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}]
@pytest.mark.parametrize("value", values)
def test_fillna_all(self, value):
def fillna(df, value, **kwargs):
return df.fillna(value)
run_and_compare(fillna, data=self.data, value=value)
def test_fillna_bool(self):
def fillna(df, **kwargs):
df["a"] = df["a"] == 1
df["a"] = df["a"].fillna(False)
return df
run_and_compare(fillna, data=self.data)
class TestConcat:
data = {
"a": [1, 2, 3],
"b": [10, 20, 30],
"d": [1000, 2000, 3000],
"e": [11, 22, 33],
}
data2 = {
"a": [4, 5, 6],
"c": [400, 500, 600],
"b": [40, 50, 60],
"f": [444, 555, 666],
}
data3 = {
"f": [2, 3, 4],
"g": [400, 500, 600],
"h": [20, 30, 40],
}
@pytest.mark.parametrize("join", ["inner", "outer"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat(self, join, sort, ignore_index):
def concat(lib, df1, df2, join, sort, ignore_index):
return lib.concat(
[df1, df2], join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data2,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_with_same_df(self):
def concat(df, **kwargs):
df["f"] = df["a"]
return df
run_and_compare(concat, data=self.data)
def test_setitem_lazy(self):
def applier(df, **kwargs):
df = df + 1
df["a"] = df["a"] + 1
df["e"] = df["a"] + 1
df["new_int8"] = np.int8(10)
df["new_int16"] = np.int16(10)
df["new_int32"] = np.int32(10)
df["new_int64"] = np.int64(10)
df["new_int"] = 10
df["new_float"] = 5.5
df["new_float64"] = np.float64(10.1)
return df
run_and_compare(applier, data=self.data)
def test_setitem_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df["a"] = np.arange(3)
df["b"] = lib.Series(np.arange(3))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_insert_lazy(self):
def applier(df, **kwargs):
df = df + 1
df.insert(2, "new_int", 10)
df.insert(1, "new_float", 5.5)
df.insert(0, "new_a", df["a"] + 1)
return df
run_and_compare(applier, data=self.data)
def test_insert_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df.insert(1, "new_range", np.arange(3))
df.insert(1, "new_series", lib.Series(np.arange(3)))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_concat_many(self):
def concat(df1, df2, lib, **kwargs):
df3 = df1.copy()
df4 = df2.copy()
return lib.concat([df1, df2, df3, df4])
def sort_comparator(df1, df2):
"""Sort and verify equality of the passed frames."""
# We sort values because order of rows in the 'union all' result is inconsistent in OmniSci
df1, df2 = (
try_cast_to_pandas(df).sort_values(df.columns[0]) for df in (df1, df2)
)
return df_equals(df1, df2)
run_and_compare(
concat, data=self.data, data2=self.data2, comparator=sort_comparator
)
def test_concat_agg(self):
def concat(lib, df1, df2):
df1 = df1.groupby("a", as_index=False).agg(
{"b": "sum", "d": "sum", "e": "sum"}
)
df2 = df2.groupby("a", as_index=False).agg(
{"c": "sum", "b": "sum", "f": "sum"}
)
return lib.concat([df1, df2])
run_and_compare(concat, data=self.data, data2=self.data2, allow_subqueries=True)
@pytest.mark.parametrize("join", ["inner", "outer"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_single(self, join, sort, ignore_index):
def concat(lib, df, join, sort, ignore_index):
return lib.concat([df], join=join, sort=sort, ignore_index=ignore_index)
run_and_compare(
concat,
data=self.data,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_groupby_concat_single(self):
def concat(lib, df):
df = lib.concat([df])
return df.groupby("a").agg({"b": "min"})
run_and_compare(
concat,
data=self.data,
)
@pytest.mark.parametrize("join", ["inner"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_join(self, join, sort, ignore_index):
def concat(lib, df1, df2, join, sort, ignore_index, **kwargs):
return lib.concat(
[df1, df2], axis=1, join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data3,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_index_name(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index("a")
df2 = pandas.DataFrame(self.data3)
df2 = df2.set_index("f")
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
df2.index.name = "a"
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
def test_concat_index_names(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index(["a", "b"])
df2 = pandas.DataFrame(self.data3)
df2 = df2.set_index(["f", "h"])
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
df2.index.names = ["a", "b"]
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
class TestGroupby:
data = {
"a": [1, 1, 2, 2, 2, 1],
"b": [11, 21, 12, 22, 32, 11],
"c": [101, 201, 202, 202, 302, 302],
}
cols_value = ["a", ["a", "b"]]
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_sum(self, cols, as_index):
def groupby_sum(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).sum()
run_and_compare(groupby_sum, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_count(self, cols, as_index):
def groupby_count(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).count()
run_and_compare(groupby_count, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.xfail(
reason="Currently mean() passes a lambda into query compiler which cannot be executed on OmniSci engine"
)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_mean(self, cols, as_index):
def groupby_mean(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).mean()
run_and_compare(groupby_mean, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_proj_sum(self, cols, as_index):
def groupby_sum(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).c.sum()
run_and_compare(
groupby_sum, data=self.data, cols=cols, as_index=as_index, force_lazy=False
)
@pytest.mark.parametrize("agg", ["count", "size", "nunique"])
def test_groupby_agg(self, agg):
def groupby(df, agg, **kwargs):
return df.groupby("a").agg({"b": agg})
run_and_compare(groupby, data=self.data, agg=agg)
def test_groupby_agg_default_to_pandas(self):
def lambda_func(df, **kwargs):
return df.groupby("a").agg(lambda df: (df.mean() - df.sum()) // 2)
run_and_compare(lambda_func, data=self.data, force_lazy=False)
def not_implemented_func(df, **kwargs):
return df.groupby("a").agg("cumprod")
run_and_compare(lambda_func, data=self.data, force_lazy=False)
@pytest.mark.xfail(
reason="Function specified as a string should be passed into query compiler API, but currently it is transformed into a lambda"
)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_agg_mean(self, cols, as_index):
def groupby_mean(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).agg("mean")
run_and_compare(groupby_mean, data=self.data, cols=cols, as_index=as_index)
def test_groupby_lazy_multiindex(self):
index = generate_multiindex(len(self.data["a"]))
def groupby(df, *args, **kwargs):
df = df + 1
return df.groupby("a").agg({"b": "size"})
run_and_compare(groupby, data=self.data, constructor_kwargs={"index": index})
def test_groupby_lazy_squeeze(self):
def applier(df, **kwargs):
return df.groupby("a").sum().squeeze(axis=1)
run_and_compare(
applier,
data=self.data,
constructor_kwargs={"columns": ["a", "b"]},
force_lazy=True,
)
@pytest.mark.parametrize("method", ["sum", "size"])
def test_groupby_series(self, method):
def groupby(df, **kwargs):
ser = df[df.columns[0]]
return getattr(ser.groupby(ser), method)()
run_and_compare(groupby, data=self.data)
def test_groupby_size(self):
def groupby(df, **kwargs):
return df.groupby("a").size()
run_and_compare(groupby, data=self.data)
@pytest.mark.parametrize("by", [["a"], ["a", "b", "c"]])
@pytest.mark.parametrize("agg", ["sum", "size"])
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_agg_by_col(self, by, agg, as_index):
def simple_agg(df, **kwargs):
return df.groupby(by, as_index=as_index).agg(agg)
run_and_compare(simple_agg, data=self.data)
def dict_agg(df, **kwargs):
return df.groupby(by, as_index=as_index).agg({by[0]: agg})
run_and_compare(dict_agg, data=self.data)
def dict_agg_all_cols(df, **kwargs):
return df.groupby(by, as_index=as_index).agg({col: agg for col in by})
run_and_compare(dict_agg_all_cols, data=self.data)
# modin-issue#3461
def test_groupby_pure_by(self):
data = [1, 1, 2, 2]
# Test when 'by' is a 'TransformNode'
run_and_compare(lambda df: df.groupby(df).sum(), data=data, force_lazy=True)
# Test when 'by' is a 'FrameNode'
md_ser, pd_ser = pd.Series(data), pandas.Series(data)
md_ser._query_compiler._modin_frame._execute()
assert isinstance(
md_ser._query_compiler._modin_frame._op, FrameNode
), "Triggering execution of the Modin frame supposed to set 'FrameNode' as a frame's op"
set_execution_mode(md_ser, "lazy")
md_res = md_ser.groupby(md_ser).sum()
set_execution_mode(md_res, None)
pd_res = pd_ser.groupby(pd_ser).sum()
df_equals(md_res, pd_res)
taxi_data = {
"a": [1, 1, 2, 2],
"b": [11, 21, 12, 11],
"c": pandas.to_datetime(
["20190902", "20180913", "20190921", "20180903"], format="%Y%m%d"
),
"d": [11.5, 21.2, 12.8, 13.4],
}
# TODO: emulate taxi queries with group by category types when we have loading
# using arrow
# Another way of doing taxi q1 is
# res = df.groupby("cab_type").size() - this should be tested later as well
def test_taxi_q1(self):
def taxi_q1(df, **kwargs):
return df.groupby("a").size()
run_and_compare(taxi_q1, data=self.taxi_data)
def test_taxi_q2(self):
def taxi_q2(df, **kwargs):
return df.groupby("a").agg({"b": "mean"})
run_and_compare(taxi_q2, data=self.taxi_data)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_taxi_q3(self, as_index):
def taxi_q3(df, as_index, **kwargs):
return df.groupby(["b", df["c"].dt.year], as_index=as_index).size()
run_and_compare(taxi_q3, data=self.taxi_data, as_index=as_index)
def test_groupby_expr_col(self):
def groupby(df, **kwargs):
df = df.loc[:, ["b", "c"]]
df["year"] = df["c"].dt.year
df["month"] = df["c"].dt.month
df["id1"] = df["year"] * 12 + df["month"]
df["id2"] = (df["id1"] - 24000) // 12
df = df.groupby(["id1", "id2"], as_index=False).agg({"b": "max"})
return df
run_and_compare(groupby, data=self.taxi_data)
def test_series_astype(self):
def series_astype(df, **kwargs):
return df["d"].astype("int")
run_and_compare(series_astype, data=self.taxi_data)
def test_df_astype(self):
def df_astype(df, **kwargs):
return df.astype({"b": "float", "d": "int"})
run_and_compare(df_astype, data=self.taxi_data)
def test_df_indexed_astype(self):
def df_astype(df, **kwargs):
df = df.groupby("a").agg({"b": "sum"})
return df.astype({"b": "float"})
run_and_compare(df_astype, data=self.taxi_data)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_taxi_q4(self, as_index):
def taxi_q4(df, **kwargs):
df["c"] = df["c"].dt.year
df["d"] = df["d"].astype("int64")
df = df.groupby(["b", "c", "d"], sort=True, as_index=as_index).size()
if as_index:
df = df.reset_index()
return df.sort_values(
by=["c", 0 if as_index else "size"],
ignore_index=True,
ascending=[True, False],
)
run_and_compare(taxi_q4, data=self.taxi_data)
h2o_data = {
"id1": ["id1", "id2", "id3", "id1", "id2", "id3", "id1", "id2", "id3", "id1"],
"id2": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
"id3": ["id4", "id5", "id6", "id4", "id5", "id6", "id4", "id5", "id6", "id4"],
"id4": [4, 5, 4, 5, 4, 5, 4, 5, 4, 5],
"id5": [7, 8, 9, 7, 8, 9, 7, 8, 9, 7],
"id6": [7, 8, 7, 8, 7, 8, 7, 8, 7, 8],
"v1": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"v2": [1, 3, 5, 7, 9, 10, 8, 6, 4, 2],
"v3": [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0],
}
def _get_h2o_df(self):
df = pandas.DataFrame(self.h2o_data)
df["id1"] = df["id1"].astype("category")
df["id2"] = df["id2"].astype("category")
df["id3"] = df["id3"].astype("category")
return df
def test_h2o_q1(self):
df = self._get_h2o_df()
ref = df.groupby(["id1"], observed=True).agg({"v1": "sum"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id1"], observed=True, as_index=False).agg(
{"v1": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
df_equals(ref, exp)
def test_h2o_q2(self):
df = self._get_h2o_df()
ref = df.groupby(["id1", "id2"], observed=True).agg({"v1": "sum"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id1", "id2"], observed=True, as_index=False).agg(
{"v1": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
exp["id2"] = exp["id2"].astype("category")
df_equals(ref, exp)
def test_h2o_q3(self):
df = self._get_h2o_df()
ref = df.groupby(["id3"], observed=True).agg({"v1": "sum", "v3": "mean"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id3"], observed=True, as_index=False).agg(
{"v1": "sum", "v3": "mean"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
def test_h2o_q4(self):
df = self._get_h2o_df()
ref = df.groupby(["id4"], observed=True).agg(
{"v1": "mean", "v2": "mean", "v3": "mean"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id4"], observed=True, as_index=False).agg(
{"v1": "mean", "v2": "mean", "v3": "mean"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
df_equals(ref, exp)
def test_h2o_q5(self):
df = self._get_h2o_df()
ref = df.groupby(["id6"], observed=True).agg(
{"v1": "sum", "v2": "sum", "v3": "sum"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id6"], observed=True, as_index=False).agg(
{"v1": "sum", "v2": "sum", "v3": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
df_equals(ref, exp)
def test_h2o_q7(self):
df = self._get_h2o_df()
ref = (
df.groupby(["id3"], observed=True)
.agg({"v1": "max", "v2": "min"})
.assign(range_v1_v2=lambda x: x["v1"] - x["v2"])[["range_v1_v2"]]
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id3"], observed=True).agg(
{"v1": "max", "v2": "min"}
)
modin_df["range_v1_v2"] = modin_df["v1"] - modin_df["v2"]
modin_df = modin_df[["range_v1_v2"]]
modin_df.reset_index(inplace=True)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
def test_h2o_q10(self):
df = self._get_h2o_df()
ref = df.groupby(["id1", "id2", "id3", "id4", "id5", "id6"], observed=True).agg(
{"v3": "sum", "v1": "count"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
modin_df = modin_df.groupby(
["id1", "id2", "id3", "id4", "id5", "id6"], observed=True
).agg({"v3": "sum", "v1": "count"})
modin_df.reset_index(inplace=True)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
exp["id2"] = exp["id2"].astype("category")
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
std_data = {
"a": [1, 2, 1, 1, 1, 2, 2, 2, 1, 2],
"b": [4, 3, 1, 6, 9, 8, 0, 9, 5, 13],
"c": [12.8, 45.6, 23.5, 12.4, 11.2, None, 56.4, 12.5, 1, 55],
}
def test_agg_std(self):
def std(df, **kwargs):
df = df.groupby("a").agg({"b": "std", "c": "std"})
if not isinstance(df, pandas.DataFrame):
df = to_pandas(df)
df["b"] = df["b"].apply(lambda x: round(x, 10))
df["c"] = df["c"].apply(lambda x: round(x, 10))
return df
run_and_compare(std, data=self.std_data, force_lazy=False)
skew_data = {
"a": [1, 2, 1, 1, 1, 2, 2, 2, 1, 2, 3, 4, 4],
"b": [4, 3, 1, 6, 9, 8, 0, 9, 5, 13, 12, 44, 6],
"c": [12.8, 45.6, 23.5, 12.4, 11.2, None, 56.4, 12.5, 1, 55, 4.5, 7.8, 9.4],
}
def test_agg_skew(self):
def std(df, **kwargs):
df = df.groupby("a").agg({"b": "skew", "c": "skew"})
if not isinstance(df, pandas.DataFrame):
df = to_pandas(df)
df["b"] = df["b"].apply(lambda x: round(x, 10))
df["c"] = df["c"].apply(lambda x: round(x, 10))
return df
run_and_compare(std, data=self.skew_data, force_lazy=False)
def test_multilevel(self):
def groupby(df, **kwargs):
return df.groupby("a").agg({"b": "min", "c": ["min", "max", "sum", "skew"]})
run_and_compare(groupby, data=self.data)
class TestAgg:
data = {
"a": [1, 2, None, None, 1, None],
"b": [10, 20, None, 20, 10, None],
"c": [None, 200, None, 400, 500, 600],
"d": [11, 22, 33, 22, 33, 22],
}
int_data = pandas.DataFrame(data).fillna(0).astype("int").to_dict()
@pytest.mark.parametrize("agg", ["max", "min", "sum", "mean"])
@pytest.mark.parametrize("skipna", bool_arg_values)
def test_simple_agg(self, agg, skipna):
def apply(df, agg, skipna, **kwargs):
return getattr(df, agg)(skipna=skipna)
run_and_compare(apply, data=self.data, agg=agg, skipna=skipna, force_lazy=False)
def test_count_agg(self):
def apply(df, **kwargs):
return df.count()
run_and_compare(apply, data=self.data, force_lazy=False)
@pytest.mark.parametrize("data", [data, int_data], ids=["nan_data", "int_data"])
@pytest.mark.parametrize("cols", ["a", "d", ["a", "d"]])
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("sort", [True])
@pytest.mark.parametrize("ascending", [True, False])
def test_value_counts(self, data, cols, dropna, sort, ascending):
def value_counts(df, cols, dropna, sort, ascending, **kwargs):
return df[cols].value_counts(dropna=dropna, sort=sort, ascending=ascending)
if dropna and pandas.DataFrame(
data, columns=cols if is_list_like(cols) else [cols]
).isna().any(axis=None):
pytest.xfail(
reason="'dropna' parameter is forcibly disabled in OmniSci's GroupBy"
"due to performance issues, you can track this problem at:"
"https://github.com/modin-project/modin/issues/2896"
)
# Custom comparator is required because pandas is inconsistent about
# the order of equal values, we can't match this behaviour. For more details:
# https://github.com/modin-project/modin/issues/1650
run_and_compare(
value_counts,
data=data,
cols=cols,
dropna=dropna,
sort=sort,
ascending=ascending,
comparator=df_equals_with_non_stable_indices,
)
@pytest.mark.parametrize(
"method", ["sum", "mean", "max", "min", "count", "nunique"]
)
def test_simple_agg_no_default(self, method):
def applier(df, **kwargs):
if isinstance(df, pd.DataFrame):
# At the end of reduction function it does inevitable `transpose`, which
# is defaulting to pandas. The following logic check that `transpose` is the only
# function that falling back to pandas in the reduction operation flow.
with pytest.warns(UserWarning) as warns:
res = getattr(df, method)()
assert (
len(warns) == 1
), f"More than one warning were arisen: len(warns) != 1 ({len(warns)} != 1)"
message = warns[0].message.args[0]
assert (
re.match(r".*transpose.*defaulting to pandas", message) is not None
), f"Expected DataFrame.transpose defaulting to pandas warning, got: {message}"
else:
res = getattr(df, method)()
return res
run_and_compare(applier, data=self.data, force_lazy=False)
@pytest.mark.parametrize("data", [data, int_data])
@pytest.mark.parametrize("dropna", bool_arg_values)
def test_nunique(self, data, dropna):
def applier(df, **kwargs):
return df.nunique(dropna=dropna)
run_and_compare(applier, data=data, force_lazy=False)
class TestMerge:
data = {
"a": [1, 2, 3, 6, 5, 4],
"b": [10, 20, 30, 60, 50, 40],
"e": [11, 22, 33, 66, 55, 44],
}
data2 = {
"a": [4, 2, 3, 7, 1, 5],
"b": [40, 20, 30, 70, 10, 50],
"d": [4000, 2000, 3000, 7000, 1000, 5000],
}
on_values = ["a", ["a"], ["a", "b"], ["b", "a"], None]
how_values = ["inner", "left"]
@pytest.mark.parametrize("on", on_values)
@pytest.mark.parametrize("how", how_values)
@pytest.mark.parametrize("sort", [True, False])
def test_merge(self, on, how, sort):
def merge(lib, df1, df2, on, how, sort, **kwargs):
return df1.merge(df2, on=on, how=how, sort=sort)
run_and_compare(
merge, data=self.data, data2=self.data2, on=on, how=how, sort=sort
)
def test_merge_non_str_column_name(self):
def merge(lib, df1, df2, on, **kwargs):
return df1.merge(df2, on=on, how="inner")
run_and_compare(merge, data=[[1, 2], [3, 4]], data2=[[1, 2], [3, 4]], on=1)
h2o_data = {
"id1": ["id1", "id10", "id100", "id1000"],
"id2": ["id2", "id20", "id200", "id2000"],
"id3": ["id3", "id30", "id300", "id3000"],
"id4": [4, 40, 400, 4000],
"id5": [5, 50, 500, 5000],
"id6": [6, 60, 600, 6000],
"v1": [3.3, 4.4, 7.7, 8.8],
}
h2o_data_small = {
"id1": ["id10", "id100", "id1000", "id10000"],
"id4": [40, 400, 4000, 40000],
"v2": [30.3, 40.4, 70.7, 80.8],
}
h2o_data_medium = {
"id1": ["id10", "id100", "id1000", "id10000"],
"id2": ["id20", "id200", "id2000", "id20000"],
"id4": [40, 400, 4000, 40000],
"id5": [50, 500, 5000, 50000],
"v2": [30.3, 40.4, 70.7, 80.8],
}
h2o_data_big = {
"id1": ["id10", "id100", "id1000", "id10000"],
"id2": ["id20", "id200", "id2000", "id20000"],
"id3": ["id30", "id300", "id3000", "id30000"],
"id4": [40, 400, 4000, 40000],
"id5": [50, 500, 5000, 50000],
"id6": [60, 600, 6000, 60000],
"v2": [30.3, 40.4, 70.7, 80.8],
}
def _get_h2o_df(self, data):
df = pandas.DataFrame(data)
if "id1" in data:
df["id1"] = df["id1"].astype("category")
if "id2" in data:
df["id2"] = df["id2"].astype("category")
if "id3" in data:
df["id3"] = df["id3"].astype("category")
return df
# Currently OmniSci returns category as string columns
# and therefore casted to category it would only have
# values from actual data. In Pandas category would
# have old values as well. Simply casting category
# to string for somparison doesn't work because None
# casted to category and back to strting becomes
# "nan". So we cast everything to category and then
# to string.
def _fix_category_cols(self, df):
if "id1" in df.columns:
df["id1"] = df["id1"].astype("category")
df["id1"] = df["id1"].astype(str)
if "id1_x" in df.columns:
df["id1_x"] = df["id1_x"].astype("category")
df["id1_x"] = df["id1_x"].astype(str)
if "id1_y" in df.columns:
df["id1_y"] = df["id1_y"].astype("category")
df["id1_y"] = df["id1_y"].astype(str)
if "id2" in df.columns:
df["id2"] = df["id2"].astype("category")
df["id2"] = df["id2"].astype(str)
if "id2_x" in df.columns:
df["id2_x"] = df["id2_x"].astype("category")
df["id2_x"] = df["id2_x"].astype(str)
if "id2_y" in df.columns:
df["id2_y"] = df["id2_y"].astype("category")
df["id2_y"] = df["id2_y"].astype(str)
if "id3" in df.columns:
df["id3"] = df["id3"].astype("category")
df["id3"] = df["id3"].astype(str)
def test_h2o_q1(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_small)
ref = lhs.merge(rhs, on="id1")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id1")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q2(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_medium)
ref = lhs.merge(rhs, on="id2")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id2")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q3(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_medium)
ref = lhs.merge(rhs, how="left", on="id2")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, how="left", on="id2")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q4(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_medium)
ref = lhs.merge(rhs, on="id5")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id5")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q5(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_big)
ref = lhs.merge(rhs, on="id3")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id3")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
dt_data1 = {
"id": [1, 2],
"timestamp": pandas.to_datetime(["20000101", "20000201"], format="%Y%m%d"),
}
dt_data2 = {"id": [1, 2], "timestamp_year": [2000, 2000]}
def test_merge_dt(self):
def merge(df1, df2, **kwargs):
df1["timestamp_year"] = df1["timestamp"].dt.year
res = df1.merge(df2, how="left", on=["id", "timestamp_year"])
res["timestamp_year"] = res["timestamp_year"].fillna(np.int64(-1))
return res
run_and_compare(merge, data=self.dt_data1, data2=self.dt_data2)
left_data = {"a": [1, 2, 3, 4], "b": [10, 20, 30, 40], "c": [11, 12, 13, 14]}
right_data = {"c": [1, 2, 3, 4], "b": [10, 20, 30, 40], "d": [100, 200, 300, 400]}
@pytest.mark.parametrize("how", how_values)
@pytest.mark.parametrize(
"left_on, right_on", [["a", "c"], [["a", "b"], ["c", "b"]]]
)
def test_merge_left_right_on(self, how, left_on, right_on):
def merge(df1, df2, how, left_on, right_on, **kwargs):
return df1.merge(df2, how=how, left_on=left_on, right_on=right_on)
run_and_compare(
merge,
data=self.left_data,
data2=self.right_data,
how=how,
left_on=left_on,
right_on=right_on,
)
run_and_compare(
merge,
data=self.right_data,
data2=self.left_data,
how=how,
left_on=right_on,
right_on=left_on,
)
class TestBinaryOp:
data = {
"a": [1, 1, 1, 1, 1],
"b": [10, 10, 10, 10, 10],
"c": [100, 100, 100, 100, 100],
"d": [1000, 1000, 1000, 1000, 1000],
}
data2 = {
"a": [1, 1, 1, 1, 1],
"f": [2, 2, 2, 2, 2],
"b": [3, 3, 3, 3, 3],
"d": [4, 4, 4, 4, 4],
}
fill_values = [None, 1]
def test_binary_level(self):
def applier(df1, df2, **kwargs):
df2.index = generate_multiindex(len(df2))
return df1.add(df2, level=1)
# setting `force_lazy=False`, because we're expecting to fallback
# to pandas in that case, which is not supported in lazy mode
run_and_compare(applier, data=self.data, data2=self.data, force_lazy=False)
def test_add_cst(self):
def add(lib, df):
return df + 1
run_and_compare(add, data=self.data)
def test_add_list(self):
def add(lib, df):
return df + [1, 2, 3, 4]
run_and_compare(add, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_add_method_columns(self, fill_value):
def add1(lib, df, fill_value):
return df["a"].add(df["b"], fill_value=fill_value)
def add2(lib, df, fill_value):
return df[["a", "c"]].add(df[["b", "a"]], fill_value=fill_value)
run_and_compare(add1, data=self.data, fill_value=fill_value)
run_and_compare(add2, data=self.data, fill_value=fill_value)
def test_add_columns(self):
def add1(lib, df):
return df["a"] + df["b"]
def add2(lib, df):
return df[["a", "c"]] + df[["b", "a"]]
run_and_compare(add1, data=self.data)
run_and_compare(add2, data=self.data)
def test_add_columns_and_assign(self):
def add(lib, df):
df["sum"] = df["a"] + df["b"]
return df
run_and_compare(add, data=self.data)
def test_add_columns_and_assign_to_existing(self):
def add(lib, df):
df["a"] = df["a"] + df["b"]
return df
run_and_compare(add, data=self.data)
def test_mul_cst(self):
def mul(lib, df):
return df * 2
run_and_compare(mul, data=self.data)
def test_mul_list(self):
def mul(lib, df):
return df * [2, 3, 4, 5]
run_and_compare(mul, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_mul_method_columns(self, fill_value):
def mul1(lib, df, fill_value):
return df["a"].mul(df["b"], fill_value=fill_value)
def mul2(lib, df, fill_value):
return df[["a", "c"]].mul(df[["b", "a"]], fill_value=fill_value)
run_and_compare(mul1, data=self.data, fill_value=fill_value)
run_and_compare(mul2, data=self.data, fill_value=fill_value)
def test_mul_columns(self):
def mul1(lib, df):
return df["a"] * df["b"]
def mul2(lib, df):
return df[["a", "c"]] * df[["b", "a"]]
run_and_compare(mul1, data=self.data)
run_and_compare(mul2, data=self.data)
def test_mod_cst(self):
def mod(lib, df):
return df % 2
run_and_compare(mod, data=self.data)
def test_mod_list(self):
def mod(lib, df):
return df % [2, 3, 4, 5]
run_and_compare(mod, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_mod_method_columns(self, fill_value):
def mod1(lib, df, fill_value):
return df["a"].mod(df["b"], fill_value=fill_value)
def mod2(lib, df, fill_value):
return df[["a", "c"]].mod(df[["b", "a"]], fill_value=fill_value)
run_and_compare(mod1, data=self.data, fill_value=fill_value)
run_and_compare(mod2, data=self.data, fill_value=fill_value)
def test_mod_columns(self):
def mod1(lib, df):
return df["a"] % df["b"]
def mod2(lib, df):
return df[["a", "c"]] % df[["b", "a"]]
run_and_compare(mod1, data=self.data)
run_and_compare(mod2, data=self.data)
def test_truediv_cst(self):
def truediv(lib, df):
return df / 2
run_and_compare(truediv, data=self.data)
def test_truediv_list(self):
def truediv(lib, df):
return df / [1, 0.5, 0.2, 2.0]
run_and_compare(truediv, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_truediv_method_columns(self, fill_value):
def truediv1(lib, df, fill_value):
return df["a"].truediv(df["b"], fill_value=fill_value)
def truediv2(lib, df, fill_value):
return df[["a", "c"]].truediv(df[["b", "a"]], fill_value=fill_value)
run_and_compare(truediv1, data=self.data, fill_value=fill_value)
run_and_compare(truediv2, data=self.data, fill_value=fill_value)
def test_truediv_columns(self):
def truediv1(lib, df):
return df["a"] / df["b"]
def truediv2(lib, df):
return df[["a", "c"]] / df[["b", "a"]]
run_and_compare(truediv1, data=self.data)
run_and_compare(truediv2, data=self.data)
def test_floordiv_cst(self):
def floordiv(lib, df):
return df // 2
run_and_compare(floordiv, data=self.data)
def test_floordiv_list(self):
def floordiv(lib, df):
return df // [1, 0.54, 0.24, 2.01]
run_and_compare(floordiv, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_floordiv_method_columns(self, fill_value):
def floordiv1(lib, df, fill_value):
return df["a"].floordiv(df["b"], fill_value=fill_value)
def floordiv2(lib, df, fill_value):
return df[["a", "c"]].floordiv(df[["b", "a"]], fill_value=fill_value)
run_and_compare(floordiv1, data=self.data, fill_value=fill_value)
run_and_compare(floordiv2, data=self.data, fill_value=fill_value)
def test_floordiv_columns(self):
def floordiv1(lib, df):
return df["a"] // df["b"]
def floordiv2(lib, df):
return df[["a", "c"]] // df[["b", "a"]]
run_and_compare(floordiv1, data=self.data)
run_and_compare(floordiv2, data=self.data)
cmp_data = {
"a": [1, 2, 3, 4, 5],
"b": [10, 20, 30, 40, 50],
"c": [50.0, 40.0, 30.1, 20.0, 10.0],
}
cmp_fn_values = ["eq", "ne", "le", "lt", "ge", "gt"]
@pytest.mark.parametrize("cmp_fn", cmp_fn_values)
def test_cmp_cst(self, cmp_fn):
def cmp1(df, cmp_fn, **kwargs):
return getattr(df["a"], cmp_fn)(3)
def cmp2(df, cmp_fn, **kwargs):
return getattr(df, cmp_fn)(30)
run_and_compare(cmp1, data=self.cmp_data, cmp_fn=cmp_fn)
run_and_compare(cmp2, data=self.cmp_data, cmp_fn=cmp_fn)
@pytest.mark.parametrize("cmp_fn", cmp_fn_values)
def test_cmp_list(self, cmp_fn):
def cmp(df, cmp_fn, **kwargs):
return getattr(df, cmp_fn)([3, 30, 30.1])
run_and_compare(cmp, data=self.cmp_data, cmp_fn=cmp_fn)
@pytest.mark.parametrize("cmp_fn", cmp_fn_values)
def test_cmp_cols(self, cmp_fn):
def cmp1(df, cmp_fn, **kwargs):
return getattr(df["b"], cmp_fn)(df["c"])
def cmp2(df, cmp_fn, **kwargs):
return getattr(df[["b", "c"]], cmp_fn)(df[["a", "b"]])
run_and_compare(cmp1, data=self.cmp_data, cmp_fn=cmp_fn)
run_and_compare(cmp2, data=self.cmp_data, cmp_fn=cmp_fn)
@pytest.mark.parametrize("cmp_fn", cmp_fn_values)
@pytest.mark.parametrize("value", [2, 2.2, "a"])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_cmp_mixed_types(self, cmp_fn, value, data):
def cmp(df, cmp_fn, value, **kwargs):
return getattr(df, cmp_fn)(value)
run_and_compare(cmp, data=data, cmp_fn=cmp_fn, value=value)
def test_filter_dtypes(self):
def filter(df, **kwargs):
return df[df.a < 4].dtypes
run_and_compare(filter, data=self.cmp_data)
@pytest.mark.xfail(
reason="Requires fix in OmniSci: https://github.com/intel-ai/omniscidb/pull/178"
)
def test_filter_empty_result(self):
def filter(df, **kwargs):
return df[df.a < 0]
run_and_compare(filter, data=self.cmp_data)
def test_complex_filter(self):
def filter_and(df, **kwargs):
return df[(df.a < 5) & (df.b > 20)]
def filter_or(df, **kwargs):
return df[(df.a < 3) | (df.b > 40)]
run_and_compare(filter_and, data=self.cmp_data)
run_and_compare(filter_or, data=self.cmp_data)
class TestDateTime:
datetime_data = {
"a": [1, 1, 2, 2],
"b": [11, 21, 12, 11],
"c": pandas.to_datetime(
["20190902", "20180913", "20190921", "20180903"], format="%Y%m%d"
),
}
def test_dt_year(self):
def dt_year(df, **kwargs):
return df["c"].dt.year
run_and_compare(dt_year, data=self.datetime_data)
def test_dt_month(self):
def dt_month(df, **kwargs):
return df["c"].dt.month
run_and_compare(dt_month, data=self.datetime_data)
def test_dt_day(self):
def dt_day(df, **kwargs):
return df["c"].dt.day
run_and_compare(dt_day, data=self.datetime_data)
class TestCategory:
data = {
"a": ["str1", "str2", "str1", "str3", "str2", None],
}
def test_cat_codes(self):
pandas_df = pandas.DataFrame(self.data)
pandas_df["a"] = pandas_df["a"].astype("category")
modin_df = pd.DataFrame(pandas_df)
modin_df["a"] = modin_df["a"].cat.codes
exp = to_pandas(modin_df)
pandas_df["a"] = pandas_df["a"].cat.codes
df_equals(pandas_df, exp)
class TestSort:
data = {
"a": [1, 2, 5, 2, 5, 4, 4, 5, 2],
"b": [1, 2, 3, 6, 5, 1, 4, 5, 3],
"c": [5, 4, 2, 3, 1, 1, 4, 5, 6],
"d": ["1", "4", "3", "2", "1", "6", "7", "5", "0"],
}
data_nulls = {
"a": [1, 2, 5, 2, 5, 4, 4, None, 2],
"b": [1, 2, 3, 6, 5, None, 4, 5, 3],
"c": [None, 4, 2, 3, 1, 1, 4, 5, 6],
}
data_multiple_nulls = {
"a": [1, 2, None, 2, 5, 4, 4, None, 2],
"b": [1, 2, 3, 6, 5, None, 4, 5, None],
"c": [None, 4, 2, None, 1, 1, 4, 5, 6],
}
cols_values = ["a", ["a", "b"], ["b", "a"], ["c", "a", "b"]]
index_cols_values = [None, "a", ["a", "b"]]
ascending_values = [True, False]
ascending_list_values = [[True, False], [False, True]]
na_position_values = ["first", "last"]
@pytest.mark.parametrize("cols", cols_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
@pytest.mark.parametrize("ascending", ascending_values)
@pytest.mark.parametrize("index_cols", index_cols_values)
def test_sort_cols(self, cols, ignore_index, index_cols, ascending):
def sort(df, cols, ignore_index, index_cols, ascending, **kwargs):
if index_cols:
df = df.set_index(index_cols)
return df.sort_values(cols, ignore_index=ignore_index, ascending=ascending)
run_and_compare(
sort,
data=self.data,
cols=cols,
ignore_index=ignore_index,
index_cols=index_cols,
ascending=ascending,
# we're expecting to fallback to pandas in that case,
# which is not supported in lazy mode
force_lazy=(index_cols is None),
)
@pytest.mark.parametrize("ascending", ascending_list_values)
def test_sort_cols_asc_list(self, ascending):
def sort(df, ascending, **kwargs):
return df.sort_values(["a", "b"], ascending=ascending)
run_and_compare(
sort,
data=self.data,
ascending=ascending,
)
@pytest.mark.parametrize("ascending", ascending_values)
def test_sort_cols_str(self, ascending):
def sort(df, ascending, **kwargs):
return df.sort_values("d", ascending=ascending)
run_and_compare(
sort,
data=self.data,
ascending=ascending,
)
@pytest.mark.parametrize("cols", cols_values)
@pytest.mark.parametrize("ascending", ascending_values)
@pytest.mark.parametrize("na_position", na_position_values)
def test_sort_cols_nulls(self, cols, ascending, na_position):
def sort(df, cols, ascending, na_position, **kwargs):
return df.sort_values(cols, ascending=ascending, na_position=na_position)
run_and_compare(
sort,
data=self.data_nulls,
cols=cols,
ascending=ascending,
na_position=na_position,
)
# Issue #1767 - rows order is not preserved for NULL keys
# @pytest.mark.parametrize("cols", cols_values)
# @pytest.mark.parametrize("ascending", ascending_values)
# @pytest.mark.parametrize("na_position", na_position_values)
# def test_sort_cols_multiple_nulls(self, cols, ascending, na_position):
# def sort(df, cols, ascending, na_position, **kwargs):
# return df.sort_values(cols, ascending=ascending, na_position=na_position)
#
# run_and_compare(
# sort,
# data=self.data_multiple_nulls,
# cols=cols,
# ascending=ascending,
# na_position=na_position,
# )
class TestBadData:
bad_for_arrow = {
"a": ["a", [[1, 2], [3]], [3, 4]],
"b": ["b", [1, 2], [3, 4]],
"c": ["1", "2", 3],
}
bad_for_omnisci = {
"b": [[1, 2], [3, 4], [5, 6]],
"c": ["1", "2", "3"],
}
ok_data = {"d": np.arange(3), "e": np.arange(3), "f": np.arange(3)}
def _get_pyarrow_table(self, obj):
if not isinstance(obj, (pandas.DataFrame, pandas.Series)):
obj = pandas.DataFrame(obj)
return pyarrow.Table.from_pandas(obj)
@pytest.mark.parametrize("data", [bad_for_arrow, bad_for_omnisci])
def test_construct(self, data):
def applier(df, *args, **kwargs):
return repr(df)
run_and_compare(applier, data=data, force_lazy=False)
def test_from_arrow(self):
at = self._get_pyarrow_table(self.bad_for_omnisci)
pd_df = pandas.DataFrame(self.bad_for_omnisci)
md_df = pd.utils.from_arrow(at)
# force materialization
repr(md_df)
df_equals(md_df, pd_df)
@pytest.mark.parametrize("data", [bad_for_arrow, bad_for_omnisci])
def test_methods(self, data):
def applier(df, *args, **kwargs):
return df.T.drop(columns=[0])
run_and_compare(applier, data=data, force_lazy=False)
def test_with_normal_frame(self):
def applier(df1, df2, *args, **kwargs):
return df2.join(df1)
run_and_compare(
applier, data=self.bad_for_omnisci, data2=self.ok_data, force_lazy=False
)
def test_heterogenous_fillna(self):
def fillna(df, **kwargs):
return df["d"].fillna("a")
run_and_compare(fillna, data=self.ok_data, force_lazy=False)
class TestDropna:
data = {
"col1": [1, 2, None, 2, 1],
"col2": [None, 3, None, 2, 1],
"col3": [2, 3, 4, None, 5],
"col4": [1, 2, 3, 4, 5],
}
@pytest.mark.parametrize("subset", [None, ["col1", "col2"]])
@pytest.mark.parametrize("how", ["all", "any"])
def test_dropna(self, subset, how):
def applier(df, *args, **kwargs):
return df.dropna(subset=subset, how=how)
run_and_compare(applier, data=self.data)
def test_dropna_multiindex(self):
index = generate_multiindex(len(self.data["col1"]))
md_df = pd.DataFrame(self.data, index=index)
pd_df = pandas.DataFrame(self.data, index=index)
md_res = md_df.dropna()._to_pandas()
pd_res = pd_df.dropna()
# HACK: all strings in OmniSci considered to be categories, that breaks
# checks for equality with pandas, this line discards category dtype
md_res.index = pandas.MultiIndex.from_tuples(
md_res.index.values, names=md_res.index.names
)
df_equals(md_res, pd_res)
@pytest.mark.skip("Dropna logic for GroupBy is disabled for now")
@pytest.mark.parametrize("by", ["col1", ["col1", "col2"], ["col1", "col4"]])
@pytest.mark.parametrize("dropna", [True, False])
def test_dropna_groupby(self, by, dropna):
def applier(df, *args, **kwargs):
# OmniSci engine preserves NaNs at the result of groupby,
# so replacing NaNs with '0' to match with Pandas.
# https://github.com/modin-project/modin/issues/2878
return df.groupby(by=by, dropna=dropna).sum().fillna(0)
run_and_compare(applier, data=self.data)
class TestUnsupportedColumns:
@pytest.mark.parametrize(
"data,is_good",
[
[["1", "2", None, "2", "1"], True],
[[None, "3", None, "2", "1"], True],
[[1, "2", None, "2", "1"], False],
[[None, 3, None, "2", "1"], False],
],
)
def test_unsupported_columns(self, data, is_good):
pandas_df = pandas.DataFrame({"col": data})
obj, bad_cols = OmnisciOnNativeDataframePartitionManager._get_unsupported_cols(
pandas_df
)
if is_good:
assert obj and not bad_cols
else:
assert not obj and bad_cols == ["col"]
class TestConstructor:
@pytest.mark.parametrize(
"index",
[
None,
pandas.Index([1, 2, 3]),
pandas.MultiIndex.from_tuples([(1, 1), (2, 2), (3, 3)]),
],
)
def test_shape_hint_detection(self, index):
df = pd.DataFrame({"a": [1, 2, 3]}, index=index)
assert df._query_compiler._shape_hint == "column"
transposed_data = df._to_pandas().T.to_dict()
df = pd.DataFrame(transposed_data)
assert df._query_compiler._shape_hint == "row"
df = pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}, index=index)
assert df._query_compiler._shape_hint is None
df = pd.DataFrame({"a": [1]}, index=None if index is None else index[:1])
assert df._query_compiler._shape_hint == "column"
def test_shape_hint_detection_from_arrow(self):
at = pyarrow.Table.from_pydict({"a": [1, 2, 3]})
df = pd.utils.from_arrow(at)
assert df._query_compiler._shape_hint == "column"
at = pyarrow.Table.from_pydict({"a": [1], "b": [2], "c": [3]})
df = pd.utils.from_arrow(at)
assert df._query_compiler._shape_hint == "row"
at = pyarrow.Table.from_pydict({"a": [1, 2, 3], "b": [1, 2, 3]})
df = pd.utils.from_arrow(at)
assert df._query_compiler._shape_hint is None
at = pyarrow.Table.from_pydict({"a": [1]})
df = pd.utils.from_arrow(at)
assert df._query_compiler._shape_hint == "column"
class TestArrowExecution:
data1 = {
"a": [1, 2, 3],
"b": [3, 4, 5],
"c": [6, 7, 8],
}
data2 = {
"a": [1, 2, 3],
"d": [3, 4, 5],
"e": [6, 7, 8],
}
def test_drop_rename_concat(self):
def drop_rename_concat(df1, df2, lib, **kwargs):
df1 = df1.rename(columns={"a": "new_a", "c": "new_b"})
df1 = df1.drop(columns="b")
df2 = df2.rename(columns={"a": "new_a", "d": "new_b"})
df2 = df2.drop(columns="e")
return lib.concat([df1, df2], ignore_index=True)
run_and_compare(
drop_rename_concat,
data=self.data1,
data2=self.data2,
force_arrow_execute=True,
)
def test_empty_transform(self):
def apply(df, **kwargs):
return df + 1
run_and_compare(apply, data={}, force_arrow_execute=True)
if __name__ == "__main__":
pytest.main(["-v", __file__])
| [
"pandas.read_csv",
"modin.pandas.Series",
"modin.pandas.read_csv",
"pyarrow.Table.from_pydict",
"numpy.int32",
"pandas.Index",
"modin.pandas.test.utils.df_equals",
"modin.pandas.concat",
"modin.pandas.utils.from_arrow",
"pandas.MultiIndex.from_tuples",
"numpy.arange",
"pandas.to_datetime",
"... | [((1116, 1140), 'modin.config.IsExperimental.put', 'IsExperimental.put', (['(True)'], {}), '(True)\n', (1134, 1140), False, 'from modin.config import IsExperimental, Engine, StorageFormat\n'), ((1141, 1161), 'modin.config.Engine.put', 'Engine.put', (['"""native"""'], {}), "('native')\n", (1151, 1161), False, 'from modin.config import IsExperimental, Engine, StorageFormat\n'), ((1162, 1190), 'modin.config.StorageFormat.put', 'StorageFormat.put', (['"""omnisci"""'], {}), "('omnisci')\n", (1179, 1190), False, 'from modin.config import IsExperimental, Engine, StorageFormat\n'), ((1765, 1810), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""TestReadCSVFixture"""'], {}), "('TestReadCSVFixture')\n", (1788, 1810), False, 'import pytest\n'), ((5703, 5765), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""null_dtype"""', "['category', 'float64']"], {}), "('null_dtype', ['category', 'float64'])\n", (5726, 5765), False, 'import pytest\n'), ((7111, 7178), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""names"""', "[None, ['a', 'b', 'c', 'd', 'e']]"], {}), "('names', [None, ['a', 'b', 'c', 'd', 'e']])\n", (7134, 7178), False, 'import pytest\n'), ((7184, 7228), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""header"""', '[None, 0]'], {}), "('header', [None, 0])\n", (7207, 7228), False, 'import pytest\n'), ((7550, 7619), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kwargs"""', "[{'sep': '|'}, {'delimiter': '|'}]"], {}), "('kwargs', [{'sep': '|'}, {'delimiter': '|'}])\n", (7573, 7619), False, 'import pytest\n'), ((7907, 7984), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""https://github.com/modin-project/modin/issues/2174"""'}), "(reason='https://github.com/modin-project/modin/issues/2174')\n", (7923, 7984), False, 'import pytest\n'), ((8544, 8594), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""engine"""', "[None, 'arrow']"], {}), "('engine', [None, 'arrow'])\n", (8567, 8594), False, 'import pytest\n'), ((8600, 8725), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""parse_dates"""', "[True, False, ['col2'], ['c2'], [['col2', 'col3']], {'col23': ['col2', 'col3']}\n ]"], {}), "('parse_dates', [True, False, ['col2'], ['c2'], [[\n 'col2', 'col3']], {'col23': ['col2', 'col3']}])\n", (8623, 8725), False, 'import pytest\n'), ((10070, 10120), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""engine"""', "[None, 'arrow']"], {}), "('engine', [None, 'arrow'])\n", (10093, 10120), False, 'import pytest\n'), ((10126, 10326), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""usecols"""', "[None, ['col1'], ['col1', 'col1'], ['col1', 'col2', 'col6'], ['col6',\n 'col2', 'col1'], [0], [0, 0], [0, 1, 5], [5, 1, 0], lambda x: x in [\n 'col1', 'col2']]"], {}), "('usecols', [None, ['col1'], ['col1', 'col1'], [\n 'col1', 'col2', 'col6'], ['col6', 'col2', 'col1'], [0], [0, 0], [0, 1, \n 5], [5, 1, 0], lambda x: x in ['col1', 'col2']])\n", (10149, 10326), False, 'import pytest\n'), ((11058, 11102), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cols"""', 'cols_values'], {}), "('cols', cols_values)\n", (11081, 11102), False, 'import pytest\n'), ((12568, 12636), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""names"""', "[None, ['', ''], ['name', 'name']]"], {}), "('names', [None, ['', ''], ['name', 'name']])\n", (12591, 12636), False, 'import pytest\n'), ((12973, 13095), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""names"""', "[None, [None, 's', None], ['i1', 'i2', 'i3'], ['i1', 'i1', 'i3'], ['i1',\n 'i2', 'a']]"], {}), "('names', [None, [None, 's', None], ['i1', 'i2',\n 'i3'], ['i1', 'i1', 'i3'], ['i1', 'i2', 'a']])\n", (12996, 13095), False, 'import pytest\n'), ((13557, 13612), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_multiindex"""', '[True, False]'], {}), "('is_multiindex', [True, False])\n", (13580, 13612), False, 'import pytest\n'), ((13618, 13709), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""column_names"""', "[None, ['level1', None], ['level1', 'level2']]"], {}), "('column_names', [None, ['level1', None], ['level1',\n 'level2']])\n", (13641, 13709), False, 'import pytest\n'), ((15587, 15627), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', 'values'], {}), "('value', values)\n", (15610, 15627), False, 'import pytest\n'), ((16410, 16461), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""join"""', "['inner', 'outer']"], {}), "('join', ['inner', 'outer'])\n", (16433, 16461), False, 'import pytest\n'), ((16467, 16515), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sort"""', 'bool_arg_values'], {}), "('sort', bool_arg_values)\n", (16490, 16515), False, 'import pytest\n'), ((16521, 16577), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ignore_index"""', 'bool_arg_values'], {}), "('ignore_index', bool_arg_values)\n", (16544, 16577), False, 'import pytest\n'), ((19709, 19760), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""join"""', "['inner', 'outer']"], {}), "('join', ['inner', 'outer'])\n", (19732, 19760), False, 'import pytest\n'), ((19766, 19814), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sort"""', 'bool_arg_values'], {}), "('sort', bool_arg_values)\n", (19789, 19814), False, 'import pytest\n'), ((19820, 19876), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ignore_index"""', 'bool_arg_values'], {}), "('ignore_index', bool_arg_values)\n", (19843, 19876), False, 'import pytest\n'), ((20495, 20537), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""join"""', "['inner']"], {}), "('join', ['inner'])\n", (20518, 20537), False, 'import pytest\n'), ((20543, 20591), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sort"""', 'bool_arg_values'], {}), "('sort', bool_arg_values)\n", (20566, 20591), False, 'import pytest\n'), ((20597, 20653), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ignore_index"""', 'bool_arg_values'], {}), "('ignore_index', bool_arg_values)\n", (20620, 20653), False, 'import pytest\n'), ((22370, 22413), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cols"""', 'cols_value'], {}), "('cols', cols_value)\n", (22393, 22413), False, 'import pytest\n'), ((22419, 22471), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_index"""', 'bool_arg_values'], {}), "('as_index', bool_arg_values)\n", (22442, 22471), False, 'import pytest\n'), ((22726, 22769), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cols"""', 'cols_value'], {}), "('cols', cols_value)\n", (22749, 22769), False, 'import pytest\n'), ((22775, 22827), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_index"""', 'bool_arg_values'], {}), "('as_index', bool_arg_values)\n", (22798, 22827), False, 'import pytest\n'), ((23090, 23223), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Currently mean() passes a lambda into query compiler which cannot be executed on OmniSci engine"""'}), "(reason=\n 'Currently mean() passes a lambda into query compiler which cannot be executed on OmniSci engine'\n )\n", (23107, 23223), False, 'import pytest\n'), ((23233, 23276), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cols"""', 'cols_value'], {}), "('cols', cols_value)\n", (23256, 23276), False, 'import pytest\n'), ((23282, 23334), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_index"""', 'bool_arg_values'], {}), "('as_index', bool_arg_values)\n", (23305, 23334), False, 'import pytest\n'), ((23593, 23636), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cols"""', 'cols_value'], {}), "('cols', cols_value)\n", (23616, 23636), False, 'import pytest\n'), ((23642, 23694), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_index"""', 'bool_arg_values'], {}), "('as_index', bool_arg_values)\n", (23665, 23694), False, 'import pytest\n'), ((23996, 24056), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""agg"""', "['count', 'size', 'nunique']"], {}), "('agg', ['count', 'size', 'nunique'])\n", (24019, 24056), False, 'import pytest\n'), ((24662, 24818), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Function specified as a string should be passed into query compiler API, but currently it is transformed into a lambda"""'}), "(reason=\n 'Function specified as a string should be passed into query compiler API, but currently it is transformed into a lambda'\n )\n", (24679, 24818), False, 'import pytest\n'), ((24828, 24871), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cols"""', 'cols_value'], {}), "('cols', cols_value)\n", (24851, 24871), False, 'import pytest\n'), ((24877, 24929), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_index"""', 'bool_arg_values'], {}), "('as_index', bool_arg_values)\n", (24900, 24929), False, 'import pytest\n'), ((25811, 25861), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['sum', 'size']"], {}), "('method', ['sum', 'size'])\n", (25834, 25861), False, 'import pytest\n'), ((26248, 26303), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""by"""', "[['a'], ['a', 'b', 'c']]"], {}), "('by', [['a'], ['a', 'b', 'c']])\n", (26271, 26303), False, 'import pytest\n'), ((26309, 26356), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""agg"""', "['sum', 'size']"], {}), "('agg', ['sum', 'size'])\n", (26332, 26356), False, 'import pytest\n'), ((26362, 26412), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_index"""', '[True, False]'], {}), "('as_index', [True, False])\n", (26385, 26412), False, 'import pytest\n'), ((28572, 28624), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_index"""', 'bool_arg_values'], {}), "('as_index', bool_arg_values)\n", (28595, 28624), False, 'import pytest\n'), ((29912, 29964), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_index"""', 'bool_arg_values'], {}), "('as_index', bool_arg_values)\n", (29935, 29964), False, 'import pytest\n'), ((37324, 37385), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""agg"""', "['max', 'min', 'sum', 'mean']"], {}), "('agg', ['max', 'min', 'sum', 'mean'])\n", (37347, 37385), False, 'import pytest\n'), ((37391, 37441), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""skipna"""', 'bool_arg_values'], {}), "('skipna', bool_arg_values)\n", (37414, 37441), False, 'import pytest\n'), ((37839, 37918), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[data, int_data]'], {'ids': "['nan_data', 'int_data']"}), "('data', [data, int_data], ids=['nan_data', 'int_data'])\n", (37862, 37918), False, 'import pytest\n'), ((37924, 37979), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cols"""', "['a', 'd', ['a', 'd']]"], {}), "('cols', ['a', 'd', ['a', 'd']])\n", (37947, 37979), False, 'import pytest\n'), ((37985, 38033), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dropna"""', '[True, False]'], {}), "('dropna', [True, False])\n", (38008, 38033), False, 'import pytest\n'), ((38039, 38078), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sort"""', '[True]'], {}), "('sort', [True])\n", (38062, 38078), False, 'import pytest\n'), ((38084, 38135), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ascending"""', '[True, False]'], {}), "('ascending', [True, False])\n", (38107, 38135), False, 'import pytest\n'), ((39254, 39342), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['sum', 'mean', 'max', 'min', 'count', 'nunique']"], {}), "('method', ['sum', 'mean', 'max', 'min', 'count',\n 'nunique'])\n", (39277, 39342), False, 'import pytest\n'), ((40436, 40485), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[data, int_data]'], {}), "('data', [data, int_data])\n", (40459, 40485), False, 'import pytest\n'), ((40491, 40541), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dropna"""', 'bool_arg_values'], {}), "('dropna', bool_arg_values)\n", (40514, 40541), False, 'import pytest\n'), ((41119, 41159), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""on"""', 'on_values'], {}), "('on', on_values)\n", (41142, 41159), False, 'import pytest\n'), ((41165, 41207), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""how"""', 'how_values'], {}), "('how', how_values)\n", (41188, 41207), False, 'import pytest\n'), ((41213, 41259), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sort"""', '[True, False]'], {}), "('sort', [True, False])\n", (41236, 41259), False, 'import pytest\n'), ((47560, 47602), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""how"""', 'how_values'], {}), "('how', how_values)\n", (47583, 47602), False, 'import pytest\n'), ((47608, 47696), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""left_on, right_on"""', "[['a', 'c'], [['a', 'b'], ['c', 'b']]]"], {}), "('left_on, right_on', [['a', 'c'], [['a', 'b'], ['c',\n 'b']]])\n", (47631, 47696), False, 'import pytest\n'), ((49350, 49400), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fill_value"""', 'fill_values'], {}), "('fill_value', fill_values)\n", (49373, 49400), False, 'import pytest\n'), ((50721, 50771), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fill_value"""', 'fill_values'], {}), "('fill_value', fill_values)\n", (50744, 50771), False, 'import pytest\n'), ((51722, 51772), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fill_value"""', 'fill_values'], {}), "('fill_value', fill_values)\n", (51745, 51772), False, 'import pytest\n'), ((52753, 52803), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fill_value"""', 'fill_values'], {}), "('fill_value', fill_values)\n", (52776, 52803), False, 'import pytest\n'), ((53843, 53893), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fill_value"""', 'fill_values'], {}), "('fill_value', fill_values)\n", (53866, 53893), False, 'import pytest\n'), ((54831, 54879), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cmp_fn"""', 'cmp_fn_values'], {}), "('cmp_fn', cmp_fn_values)\n", (54854, 54879), False, 'import pytest\n'), ((55224, 55272), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cmp_fn"""', 'cmp_fn_values'], {}), "('cmp_fn', cmp_fn_values)\n", (55247, 55272), False, 'import pytest\n'), ((55474, 55522), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cmp_fn"""', 'cmp_fn_values'], {}), "('cmp_fn', cmp_fn_values)\n", (55497, 55522), False, 'import pytest\n'), ((55898, 55946), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cmp_fn"""', 'cmp_fn_values'], {}), "('cmp_fn', cmp_fn_values)\n", (55921, 55946), False, 'import pytest\n'), ((55952, 55999), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', "[2, 2.2, 'a']"], {}), "('value', [2, 2.2, 'a'])\n", (55975, 55999), False, 'import pytest\n'), ((56005, 56074), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', 'test_data_values'], {'ids': 'test_data_keys'}), "('data', test_data_values, ids=test_data_keys)\n", (56028, 56074), False, 'import pytest\n'), ((56460, 56564), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Requires fix in OmniSci: https://github.com/intel-ai/omniscidb/pull/178"""'}), "(reason=\n 'Requires fix in OmniSci: https://github.com/intel-ai/omniscidb/pull/178')\n", (56477, 56564), False, 'import pytest\n'), ((59029, 59073), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cols"""', 'cols_values'], {}), "('cols', cols_values)\n", (59052, 59073), False, 'import pytest\n'), ((59079, 59135), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ignore_index"""', 'bool_arg_values'], {}), "('ignore_index', bool_arg_values)\n", (59102, 59135), False, 'import pytest\n'), ((59141, 59195), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ascending"""', 'ascending_values'], {}), "('ascending', ascending_values)\n", (59164, 59195), False, 'import pytest\n'), ((59201, 59257), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""index_cols"""', 'index_cols_values'], {}), "('index_cols', index_cols_values)\n", (59224, 59257), False, 'import pytest\n'), ((59946, 60005), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ascending"""', 'ascending_list_values'], {}), "('ascending', ascending_list_values)\n", (59969, 60005), False, 'import pytest\n'), ((60287, 60341), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ascending"""', 'ascending_values'], {}), "('ascending', ascending_values)\n", (60310, 60341), False, 'import pytest\n'), ((60611, 60655), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cols"""', 'cols_values'], {}), "('cols', cols_values)\n", (60634, 60655), False, 'import pytest\n'), ((60661, 60715), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ascending"""', 'ascending_values'], {}), "('ascending', ascending_values)\n", (60684, 60715), False, 'import pytest\n'), ((60721, 60779), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""na_position"""', 'na_position_values'], {}), "('na_position', na_position_values)\n", (60744, 60779), False, 'import pytest\n'), ((62374, 62439), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[bad_for_arrow, bad_for_omnisci]'], {}), "('data', [bad_for_arrow, bad_for_omnisci])\n", (62397, 62439), False, 'import pytest\n'), ((62886, 62951), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[bad_for_arrow, bad_for_omnisci]'], {}), "('data', [bad_for_arrow, bad_for_omnisci])\n", (62909, 62951), False, 'import pytest\n'), ((63747, 63806), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""subset"""', "[None, ['col1', 'col2']]"], {}), "('subset', [None, ['col1', 'col2']])\n", (63770, 63806), False, 'import pytest\n'), ((63812, 63858), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""how"""', "['all', 'any']"], {}), "('how', ['all', 'any'])\n", (63835, 63858), False, 'import pytest\n'), ((64653, 64717), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Dropna logic for GroupBy is disabled for now"""'], {}), "('Dropna logic for GroupBy is disabled for now')\n", (64669, 64717), False, 'import pytest\n'), ((64723, 64798), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""by"""', "['col1', ['col1', 'col2'], ['col1', 'col4']]"], {}), "('by', ['col1', ['col1', 'col2'], ['col1', 'col4']])\n", (64746, 64798), False, 'import pytest\n'), ((64804, 64852), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dropna"""', '[True, False]'], {}), "('dropna', [True, False])\n", (64827, 64852), False, 'import pytest\n'), ((65295, 65488), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data,is_good"""', "[[['1', '2', None, '2', '1'], True], [[None, '3', None, '2', '1'], True], [\n [1, '2', None, '2', '1'], False], [[None, 3, None, '2', '1'], False]]"], {}), "('data,is_good', [[['1', '2', None, '2', '1'], True],\n [[None, '3', None, '2', '1'], True], [[1, '2', None, '2', '1'], False],\n [[None, 3, None, '2', '1'], False]])\n", (65318, 65488), False, 'import pytest\n'), ((68349, 68378), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (68360, 68378), False, 'import pytest\n'), ((2829, 2898), 'os.path.join', 'os.path.join', (['self.root', '"""modin/pandas/test/data"""', '"""test_usecols.csv"""'], {}), "(self.root, 'modin/pandas/test/data', 'test_usecols.csv')\n", (2841, 2898), False, 'import os\n'), ((3482, 3541), 'os.path.join', 'os.path.join', (['self.root', '"""examples/data/boston_housing.csv"""'], {}), "(self.root, 'examples/data/boston_housing.csv')\n", (3494, 3541), False, 'import os\n'), ((4024, 4098), 'os.path.join', 'os.path.join', (['self.root', '"""modin/pandas/test/data"""', '"""test_time_parsing.csv"""'], {}), "(self.root, 'modin/pandas/test/data', 'test_time_parsing.csv')\n", (4036, 4098), False, 'import os\n'), ((5063, 5122), 'os.path.join', 'os.path.join', (['self.root', '"""examples/data/boston_housing.csv"""'], {}), "(self.root, 'examples/data/boston_housing.csv')\n", (5075, 5122), False, 'import os\n'), ((5826, 5896), 'os.path.join', 'os.path.join', (['self.root', '"""modin/pandas/test/data"""', '"""test_null_col.csv"""'], {}), "(self.root, 'modin/pandas/test/data', 'test_null_col.csv')\n", (5838, 5896), False, 'import os\n'), ((5933, 6050), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {'names': "['a', 'b', 'c']", 'dtype': "{'a': 'int64', 'b': 'int64', 'c': null_dtype}", 'skiprows': '(1)'}), "(csv_file, names=['a', 'b', 'c'], dtype={'a': 'int64', 'b':\n 'int64', 'c': null_dtype}, skiprows=1)\n", (5948, 6050), False, 'import pandas\n'), ((6160, 6273), 'modin.pandas.read_csv', 'pd.read_csv', (['csv_file'], {'names': "['a', 'b', 'c']", 'dtype': "{'a': 'int64', 'b': 'int64', 'c': null_dtype}", 'skiprows': '(1)'}), "(csv_file, names=['a', 'b', 'c'], dtype={'a': 'int64', 'b':\n 'int64', 'c': null_dtype}, skiprows=1)\n", (6171, 6273), True, 'import modin.pandas as pd\n'), ((6644, 6663), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (6653, 6663), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((6720, 6789), 'os.path.join', 'os.path.join', (['self.root', '"""modin/pandas/test/data"""', '"""test_usecols.csv"""'], {}), "(self.root, 'modin/pandas/test/data', 'test_usecols.csv')\n", (6732, 6789), False, 'import os\n'), ((6805, 6830), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {}), '(csv_file)\n', (6820, 6830), False, 'import pandas\n'), ((6846, 6871), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {}), '(csv_file)\n', (6861, 6871), False, 'import pandas\n'), ((6886, 6913), 'pandas.concat', 'pandas.concat', (['[ref1, ref2]'], {}), '([ref1, ref2])\n', (6899, 6913), False, 'import pandas\n'), ((6930, 6955), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {}), '(csv_file)\n', (6945, 6955), False, 'import pandas\n'), ((6971, 6996), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {}), '(csv_file)\n', (6986, 6996), False, 'import pandas\n'), ((7011, 7034), 'modin.pandas.concat', 'pd.concat', (['[exp1, exp2]'], {}), '([exp1, exp2])\n', (7020, 7034), True, 'import modin.pandas as pd\n'), ((7292, 7361), 'os.path.join', 'os.path.join', (['self.root', '"""modin/pandas/test/data"""', '"""test_usecols.csv"""'], {}), "(self.root, 'modin/pandas/test/data', 'test_usecols.csv')\n", (7304, 7361), False, 'import os\n'), ((7681, 7748), 'os.path.join', 'os.path.join', (['self.root', '"""modin/pandas/test/data"""', '"""test_delim.csv"""'], {}), "(self.root, 'modin/pandas/test/data', 'test_delim.csv')\n", (7693, 7748), False, 'import os\n'), ((8032, 8101), 'os.path.join', 'os.path.join', (['self.root', '"""modin/pandas/test/data"""', '"""test_usecols.csv"""'], {}), "(self.root, 'modin/pandas/test/data', 'test_usecols.csv')\n", (8044, 8101), False, 'import os\n'), ((8207, 8242), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {}), '(csv_file, **kwargs)\n', (8222, 8242), False, 'import pandas\n'), ((8320, 8367), 'modin.pandas.read_csv', 'pd.read_csv', (['csv_file'], {'engine': '"""arrow"""'}), "(csv_file, **kwargs, engine='arrow')\n", (8331, 8367), True, 'import modin.pandas as pd\n'), ((12527, 12540), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (12536, 12540), True, 'import numpy as np\n'), ((12547, 12560), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (12556, 12560), True, 'import numpy as np\n'), ((12936, 12966), 'modin.pandas.test.utils.df_equals', 'df_equals', (['pandas_df', 'modin_df'], {}), '(pandas_df, modin_df)\n', (12945, 12966), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((13517, 13550), 'modin.pandas.test.utils.eval_general', 'eval_general', (['pd', 'pandas', 'applier'], {}), '(pd, pandas, applier)\n', (13529, 13550), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((14127, 14202), 'pandas.MultiIndex.from_tuples', 'pandas.MultiIndex.from_tuples', (["[('a', 'b'), ('b', 'c')]"], {'names': 'column_names'}), "([('a', 'b'), ('b', 'c')], names=column_names)\n", (14156, 14202), False, 'import pandas\n'), ((14672, 14712), 'pandas.DataFrame', 'pandas.DataFrame', (['self.data'], {'index': 'index'}), '(self.data, index=index)\n', (14688, 14712), False, 'import pandas\n'), ((14774, 14810), 'modin.pandas.DataFrame', 'pd.DataFrame', (['self.data'], {'index': 'index'}), '(self.data, index=index)\n', (14786, 14810), True, 'import modin.pandas as pd\n'), ((14880, 14910), 'modin.pandas.test.utils.df_equals', 'df_equals', (['pandas_df', 'modin_df'], {}), '(pandas_df, modin_df)\n', (14889, 14910), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((15104, 15144), 'pandas.DataFrame', 'pandas.DataFrame', (['self.data'], {'index': 'index'}), '(self.data, index=index)\n', (15120, 15144), False, 'import pandas\n'), ((15236, 15272), 'modin.pandas.DataFrame', 'pd.DataFrame', (['self.data'], {'index': 'index'}), '(self.data, index=index)\n', (15248, 15272), True, 'import modin.pandas as pd\n'), ((15394, 15424), 'modin.pandas.test.utils.df_equals', 'df_equals', (['pandas_df', 'modin_df'], {}), '(pandas_df, modin_df)\n', (15403, 15424), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((21164, 21191), 'pandas.DataFrame', 'pandas.DataFrame', (['self.data'], {}), '(self.data)\n', (21180, 21191), False, 'import pandas\n'), ((21239, 21267), 'pandas.DataFrame', 'pandas.DataFrame', (['self.data3'], {}), '(self.data3)\n', (21255, 21267), False, 'import pandas\n'), ((21316, 21363), 'pandas.concat', 'pandas.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""inner"""'}), "([df1, df2], axis=1, join='inner')\n", (21329, 21363), False, 'import pandas\n'), ((21378, 21421), 'modin.pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""inner"""'}), "([df1, df2], axis=1, join='inner')\n", (21387, 21421), True, 'import modin.pandas as pd\n'), ((21431, 21450), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (21440, 21450), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((21495, 21542), 'pandas.concat', 'pandas.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""inner"""'}), "([df1, df2], axis=1, join='inner')\n", (21508, 21542), False, 'import pandas\n'), ((21557, 21600), 'modin.pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""inner"""'}), "([df1, df2], axis=1, join='inner')\n", (21566, 21600), True, 'import modin.pandas as pd\n'), ((21610, 21629), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (21619, 21629), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((21684, 21711), 'pandas.DataFrame', 'pandas.DataFrame', (['self.data'], {}), '(self.data)\n', (21700, 21711), False, 'import pandas\n'), ((21766, 21794), 'pandas.DataFrame', 'pandas.DataFrame', (['self.data3'], {}), '(self.data3)\n', (21782, 21794), False, 'import pandas\n'), ((21850, 21897), 'pandas.concat', 'pandas.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""inner"""'}), "([df1, df2], axis=1, join='inner')\n", (21863, 21897), False, 'import pandas\n'), ((21912, 21955), 'modin.pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""inner"""'}), "([df1, df2], axis=1, join='inner')\n", (21921, 21955), True, 'import modin.pandas as pd\n'), ((21965, 21984), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (21974, 21984), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((22037, 22084), 'pandas.concat', 'pandas.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""inner"""'}), "([df1, df2], axis=1, join='inner')\n", (22050, 22084), False, 'import pandas\n'), ((22099, 22142), 'modin.pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""inner"""'}), "([df1, df2], axis=1, join='inner')\n", (22108, 22142), True, 'import modin.pandas as pd\n'), ((22152, 22171), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (22161, 22171), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((27725, 27750), 'modin.pandas.test.utils.df_equals', 'df_equals', (['md_res', 'pd_res'], {}), '(md_res, pd_res)\n', (27734, 27750), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((27841, 27931), 'pandas.to_datetime', 'pandas.to_datetime', (["['20190902', '20180913', '20190921', '20180903']"], {'format': '"""%Y%m%d"""'}), "(['20190902', '20180913', '20190921', '20180903'], format\n ='%Y%m%d')\n", (27859, 27931), False, 'import pandas\n'), ((31128, 31159), 'pandas.DataFrame', 'pandas.DataFrame', (['self.h2o_data'], {}), '(self.h2o_data)\n', (31144, 31159), False, 'import pandas\n'), ((31512, 31528), 'modin.pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (31524, 31528), True, 'import modin.pandas as pd\n'), ((31749, 31768), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_df'], {}), '(modin_df)\n', (31758, 31768), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((31829, 31848), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (31838, 31848), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((32043, 32059), 'modin.pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (32055, 32059), True, 'import modin.pandas as pd\n'), ((32287, 32306), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_df'], {}), '(modin_df)\n', (32296, 32306), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((32418, 32437), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (32427, 32437), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((32639, 32655), 'modin.pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (32651, 32655), True, 'import modin.pandas as pd\n'), ((32890, 32909), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_df'], {}), '(modin_df)\n', (32899, 32909), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((32970, 32989), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (32979, 32989), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((33228, 33244), 'modin.pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (33240, 33244), True, 'import modin.pandas as pd\n'), ((33494, 33513), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_df'], {}), '(modin_df)\n', (33503, 33513), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((33523, 33542), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (33532, 33542), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((33778, 33794), 'modin.pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (33790, 33794), True, 'import modin.pandas as pd\n'), ((34041, 34060), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_df'], {}), '(modin_df)\n', (34050, 34060), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((34070, 34089), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (34079, 34089), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((34405, 34421), 'modin.pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (34417, 34421), True, 'import modin.pandas as pd\n'), ((34793, 34812), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_df'], {}), '(modin_df)\n', (34802, 34812), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((34873, 34892), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (34882, 34892), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((35153, 35169), 'modin.pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (35165, 35169), True, 'import modin.pandas as pd\n'), ((35379, 35398), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_df'], {}), '(modin_df)\n', (35388, 35398), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((35561, 35580), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (35570, 35580), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((42908, 42930), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {}), '(data)\n', (42924, 42930), False, 'import pandas\n'), ((44817, 44834), 'modin.pandas.DataFrame', 'pd.DataFrame', (['lhs'], {}), '(lhs)\n', (44829, 44834), True, 'import modin.pandas as pd\n'), ((44855, 44872), 'modin.pandas.DataFrame', 'pd.DataFrame', (['rhs'], {}), '(rhs)\n', (44867, 44872), True, 'import modin.pandas as pd\n'), ((44945, 44965), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_res'], {}), '(modin_res)\n', (44954, 44965), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((45012, 45031), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (45021, 45031), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((45257, 45274), 'modin.pandas.DataFrame', 'pd.DataFrame', (['lhs'], {}), '(lhs)\n', (45269, 45274), True, 'import modin.pandas as pd\n'), ((45295, 45312), 'modin.pandas.DataFrame', 'pd.DataFrame', (['rhs'], {}), '(rhs)\n', (45307, 45312), True, 'import modin.pandas as pd\n'), ((45385, 45405), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_res'], {}), '(modin_res)\n', (45394, 45405), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((45452, 45471), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (45461, 45471), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((45709, 45726), 'modin.pandas.DataFrame', 'pd.DataFrame', (['lhs'], {}), '(lhs)\n', (45721, 45726), True, 'import modin.pandas as pd\n'), ((45747, 45764), 'modin.pandas.DataFrame', 'pd.DataFrame', (['rhs'], {}), '(rhs)\n', (45759, 45764), True, 'import modin.pandas as pd\n'), ((45849, 45869), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_res'], {}), '(modin_res)\n', (45858, 45869), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((45916, 45935), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (45925, 45935), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((46161, 46178), 'modin.pandas.DataFrame', 'pd.DataFrame', (['lhs'], {}), '(lhs)\n', (46173, 46178), True, 'import modin.pandas as pd\n'), ((46199, 46216), 'modin.pandas.DataFrame', 'pd.DataFrame', (['rhs'], {}), '(rhs)\n', (46211, 46216), True, 'import modin.pandas as pd\n'), ((46289, 46309), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_res'], {}), '(modin_res)\n', (46298, 46309), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((46356, 46375), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (46365, 46375), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((46598, 46615), 'modin.pandas.DataFrame', 'pd.DataFrame', (['lhs'], {}), '(lhs)\n', (46610, 46615), True, 'import modin.pandas as pd\n'), ((46636, 46653), 'modin.pandas.DataFrame', 'pd.DataFrame', (['rhs'], {}), '(rhs)\n', (46648, 46653), True, 'import modin.pandas as pd\n'), ((46726, 46746), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_res'], {}), '(modin_res)\n', (46735, 46746), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((46793, 46812), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (46802, 46812), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((46874, 46935), 'pandas.to_datetime', 'pandas.to_datetime', (["['20000101', '20000201']"], {'format': '"""%Y%m%d"""'}), "(['20000101', '20000201'], format='%Y%m%d')\n", (46892, 46935), False, 'import pandas\n'), ((57168, 57258), 'pandas.to_datetime', 'pandas.to_datetime', (["['20190902', '20180913', '20190921', '20180903']"], {'format': '"""%Y%m%d"""'}), "(['20190902', '20180913', '20190921', '20180903'], format\n ='%Y%m%d')\n", (57186, 57258), False, 'import pandas\n'), ((57910, 57937), 'pandas.DataFrame', 'pandas.DataFrame', (['self.data'], {}), '(self.data)\n', (57926, 57937), False, 'import pandas\n'), ((58017, 58040), 'modin.pandas.DataFrame', 'pd.DataFrame', (['pandas_df'], {}), '(pandas_df)\n', (58029, 58040), True, 'import modin.pandas as pd\n'), ((58104, 58123), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['modin_df'], {}), '(modin_df)\n', (58113, 58123), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((58184, 58209), 'modin.pandas.test.utils.df_equals', 'df_equals', (['pandas_df', 'exp'], {}), '(pandas_df, exp)\n', (58193, 58209), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((62122, 62134), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (62131, 62134), True, 'import numpy as np\n'), ((62141, 62153), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (62150, 62153), True, 'import numpy as np\n'), ((62160, 62172), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (62169, 62172), True, 'import numpy as np\n'), ((62337, 62367), 'pyarrow.Table.from_pandas', 'pyarrow.Table.from_pandas', (['obj'], {}), '(obj)\n', (62362, 62367), False, 'import pyarrow\n'), ((62716, 62754), 'pandas.DataFrame', 'pandas.DataFrame', (['self.bad_for_omnisci'], {}), '(self.bad_for_omnisci)\n', (62732, 62754), False, 'import pandas\n'), ((62771, 62794), 'modin.pandas.utils.from_arrow', 'pd.utils.from_arrow', (['at'], {}), '(at)\n', (62790, 62794), True, 'import modin.pandas as pd\n'), ((62856, 62879), 'modin.pandas.test.utils.df_equals', 'df_equals', (['md_df', 'pd_df'], {}), '(md_df, pd_df)\n', (62865, 62879), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((64160, 64196), 'modin.pandas.DataFrame', 'pd.DataFrame', (['self.data'], {'index': 'index'}), '(self.data, index=index)\n', (64172, 64196), True, 'import modin.pandas as pd\n'), ((64213, 64253), 'pandas.DataFrame', 'pandas.DataFrame', (['self.data'], {'index': 'index'}), '(self.data, index=index)\n', (64229, 64253), False, 'import pandas\n'), ((64513, 64589), 'pandas.MultiIndex.from_tuples', 'pandas.MultiIndex.from_tuples', (['md_res.index.values'], {'names': 'md_res.index.names'}), '(md_res.index.values, names=md_res.index.names)\n', (64542, 64589), False, 'import pandas\n'), ((64621, 64646), 'modin.pandas.test.utils.df_equals', 'df_equals', (['md_res', 'pd_res'], {}), '(md_res, pd_res)\n', (64630, 64646), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((65638, 65669), 'pandas.DataFrame', 'pandas.DataFrame', (["{'col': data}"], {}), "({'col': data})\n", (65654, 65669), False, 'import pandas\n'), ((65694, 65767), 'modin.experimental.core.execution.native.implementations.omnisci_on_native.partitioning.partition_manager.OmnisciOnNativeDataframePartitionManager._get_unsupported_cols', 'OmnisciOnNativeDataframePartitionManager._get_unsupported_cols', (['pandas_df'], {}), '(pandas_df)\n', (65756, 65767), False, 'from modin.experimental.core.execution.native.implementations.omnisci_on_native.partitioning.partition_manager import OmnisciOnNativeDataframePartitionManager\n'), ((66199, 66242), 'modin.pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2, 3]}"], {'index': 'index'}), "({'a': [1, 2, 3]}, index=index)\n", (66211, 66242), True, 'import modin.pandas as pd\n'), ((66369, 66398), 'modin.pandas.DataFrame', 'pd.DataFrame', (['transposed_data'], {}), '(transposed_data)\n', (66381, 66398), True, 'import modin.pandas as pd\n'), ((66468, 66527), 'modin.pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2, 3], 'b': [1, 2, 3]}"], {'index': 'index'}), "({'a': [1, 2, 3], 'b': [1, 2, 3]}, index=index)\n", (66480, 66527), True, 'import modin.pandas as pd\n'), ((66596, 66664), 'modin.pandas.DataFrame', 'pd.DataFrame', (["{'a': [1]}"], {'index': '(None if index is None else index[:1])'}), "({'a': [1]}, index=None if index is None else index[:1])\n", (66608, 66664), True, 'import modin.pandas as pd\n'), ((66789, 66832), 'pyarrow.Table.from_pydict', 'pyarrow.Table.from_pydict', (["{'a': [1, 2, 3]}"], {}), "({'a': [1, 2, 3]})\n", (66814, 66832), False, 'import pyarrow\n'), ((66846, 66869), 'modin.pandas.utils.from_arrow', 'pd.utils.from_arrow', (['at'], {}), '(at)\n', (66865, 66869), True, 'import modin.pandas as pd\n'), ((66942, 66999), 'pyarrow.Table.from_pydict', 'pyarrow.Table.from_pydict', (["{'a': [1], 'b': [2], 'c': [3]}"], {}), "({'a': [1], 'b': [2], 'c': [3]})\n", (66967, 66999), False, 'import pyarrow\n'), ((67013, 67036), 'modin.pandas.utils.from_arrow', 'pd.utils.from_arrow', (['at'], {}), '(at)\n', (67032, 67036), True, 'import modin.pandas as pd\n'), ((67106, 67165), 'pyarrow.Table.from_pydict', 'pyarrow.Table.from_pydict', (["{'a': [1, 2, 3], 'b': [1, 2, 3]}"], {}), "({'a': [1, 2, 3], 'b': [1, 2, 3]})\n", (67131, 67165), False, 'import pyarrow\n'), ((67179, 67202), 'modin.pandas.utils.from_arrow', 'pd.utils.from_arrow', (['at'], {}), '(at)\n', (67198, 67202), True, 'import modin.pandas as pd\n'), ((67271, 67308), 'pyarrow.Table.from_pydict', 'pyarrow.Table.from_pydict', (["{'a': [1]}"], {}), "({'a': [1]})\n", (67296, 67308), False, 'import pyarrow\n'), ((67322, 67345), 'modin.pandas.utils.from_arrow', 'pd.utils.from_arrow', (['at'], {}), '(at)\n', (67341, 67345), True, 'import modin.pandas as pd\n'), ((4607, 4642), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {}), '(csv_file, **kwargs)\n', (4622, 4642), False, 'import pandas\n'), ((4660, 4707), 'modin.pandas.read_csv', 'pd.read_csv', (['csv_file'], {'engine': '"""arrow"""'}), "(csv_file, engine='arrow', **kwargs)\n", (4671, 4707), True, 'import modin.pandas as pd\n'), ((7085, 7104), 'modin.pandas.test.utils.df_equals', 'df_equals', (['ref', 'exp'], {}), '(ref, exp)\n', (7094, 7104), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((8477, 8507), 'modin.pandas.test.utils.df_equals', 'df_equals', (['modin_df', 'pandas_df'], {}), '(modin_df, pandas_df)\n', (8486, 8507), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((9253, 9394), 'pytest.skip', 'pytest.skip', (['"""In these cases Modin raises `ArrowEngineException` while pandas doesn\'t raise any exceptions that causes tests fails"""'], {}), '(\n "In these cases Modin raises `ArrowEngineException` while pandas doesn\'t raise any exceptions that causes tests fails"\n )\n', (9264, 9394), False, 'import pytest\n'), ((12822, 12862), 'pandas.DataFrame', 'pandas.DataFrame', (['self.data'], {'index': 'index'}), '(self.data, index=index)\n', (12838, 12862), False, 'import pandas\n'), ((12886, 12922), 'modin.pandas.DataFrame', 'pd.DataFrame', (['self.data'], {'index': 'index'}), '(self.data, index=index)\n', (12898, 12922), True, 'import modin.pandas as pd\n'), ((17378, 17389), 'numpy.int8', 'np.int8', (['(10)'], {}), '(10)\n', (17385, 17389), True, 'import numpy as np\n'), ((17420, 17432), 'numpy.int16', 'np.int16', (['(10)'], {}), '(10)\n', (17428, 17432), True, 'import numpy as np\n'), ((17463, 17475), 'numpy.int32', 'np.int32', (['(10)'], {}), '(10)\n', (17471, 17475), True, 'import numpy as np\n'), ((17506, 17518), 'numpy.int64', 'np.int64', (['(10)'], {}), '(10)\n', (17514, 17518), True, 'import numpy as np\n'), ((17616, 17632), 'numpy.float64', 'np.float64', (['(10.1)'], {}), '(10.1)\n', (17626, 17632), True, 'import numpy as np\n'), ((17828, 17840), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (17837, 17840), True, 'import numpy as np\n'), ((19121, 19140), 'modin.pandas.test.utils.df_equals', 'df_equals', (['df1', 'df2'], {}), '(df1, df2)\n', (19130, 19140), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((27259, 27274), 'modin.pandas.Series', 'pd.Series', (['data'], {}), '(data)\n', (27268, 27274), True, 'import modin.pandas as pd\n'), ((27276, 27295), 'pandas.Series', 'pandas.Series', (['data'], {}), '(data)\n', (27289, 27295), False, 'import pandas\n'), ((38516, 38716), 'pytest.xfail', 'pytest.xfail', ([], {'reason': '"""\'dropna\' parameter is forcibly disabled in OmniSci\'s GroupBydue to performance issues, you can track this problem at:https://github.com/modin-project/modin/issues/2896"""'}), '(reason=\n "\'dropna\' parameter is forcibly disabled in OmniSci\'s GroupBydue to performance issues, you can track this problem at:https://github.com/modin-project/modin/issues/2896"\n )\n', (38528, 38716), False, 'import pytest\n'), ((62299, 62320), 'pandas.DataFrame', 'pandas.DataFrame', (['obj'], {}), '(obj)\n', (62315, 62320), False, 'import pandas\n'), ((66027, 66050), 'pandas.Index', 'pandas.Index', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (66039, 66050), False, 'import pandas\n'), ((66064, 66119), 'pandas.MultiIndex.from_tuples', 'pandas.MultiIndex.from_tuples', (['[(1, 1), (2, 2), (3, 3)]'], {}), '([(1, 1), (2, 2), (3, 3)])\n', (66093, 66119), False, 'import pandas\n'), ((1924, 1951), 'os.path.abspath', 'os.path.abspath', (['modin_root'], {}), '(modin_root)\n', (1939, 1951), False, 'import os\n'), ((4770, 4783), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['rm'], {}), '(rm)\n', (4779, 4783), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((4800, 4859), 'modin.pandas.test.utils.df_equals', 'df_equals', (["rm['timestamp'].dt.year", "rp['timestamp'].dt.year"], {}), "(rm['timestamp'].dt.year, rp['timestamp'].dt.year)\n", (4809, 4859), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((4876, 4937), 'modin.pandas.test.utils.df_equals', 'df_equals', (["rm['timestamp'].dt.month", "rp['timestamp'].dt.month"], {}), "(rm['timestamp'].dt.month, rp['timestamp'].dt.month)\n", (4885, 4937), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((4954, 5011), 'modin.pandas.test.utils.df_equals', 'df_equals', (["rm['timestamp'].dt.day", "rp['timestamp'].dt.day"], {}), "(rm['timestamp'].dt.day, rp['timestamp'].dt.day)\n", (4963, 5011), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((6571, 6585), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['exp'], {}), '(exp)\n', (6580, 6585), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((17874, 17886), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (17883, 17886), True, 'import numpy as np\n'), ((18410, 18422), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (18419, 18422), True, 'import numpy as np\n'), ((35963, 35976), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['df'], {}), '(df)\n', (35972, 35976), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((36608, 36621), 'modin.pandas.test.utils.to_pandas', 'to_pandas', (['df'], {}), '(df)\n', (36617, 36621), False, 'from modin.pandas.test.utils import df_equals, bool_arg_values, to_pandas, test_data_values, test_data_keys, generate_multiindex, eval_general, df_equals_with_non_stable_indices\n'), ((47274, 47286), 'numpy.int64', 'np.int64', (['(-1)'], {}), '(-1)\n', (47282, 47286), True, 'import numpy as np\n'), ((3130, 3147), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (3138, 3147), True, 'import numpy as np\n'), ((3154, 3171), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (3162, 3171), True, 'import numpy as np\n'), ((18474, 18486), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (18483, 18486), True, 'import numpy as np\n'), ((39779, 39804), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (39791, 39804), False, 'import pytest\n'), ((40113, 40167), 're.match', 're.match', (['""".*transpose.*defaulting to pandas"""', 'message'], {}), "('.*transpose.*defaulting to pandas', message)\n", (40121, 40167), False, 'import re\n'), ((19017, 19039), 'modin.utils.try_cast_to_pandas', 'try_cast_to_pandas', (['df'], {}), '(df)\n', (19035, 19039), False, 'from modin.utils import try_cast_to_pandas\n'), ((37261, 37283), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {}), '(data)\n', (37277, 37283), False, 'import pandas\n'), ((38440, 38458), 'pandas.core.dtypes.common.is_list_like', 'is_list_like', (['cols'], {}), '(cols)\n', (38452, 38458), False, 'from pandas.core.dtypes.common import is_list_like\n')] |
import sys
import numpy as np
import pandas as pd
from pspy import so_dict, so_map
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
binary = so_map.read_map(d["template"])
if binary.data.ndim > 2:
# Only use temperature
binary.data = binary.data[0]
binary.data = binary.data.astype(np.int16)
binary.data[:] = 1
# Sigurd point sources
if "point_source_file" in d:
print("Adding point sources...")
df = pd.read_table(d["point_source_file"], escapechar="#", sep="\s+")
high_flux_good_SNR = (df.Tflux > d.get("point_source_Tflux", 15)) & (
df.SNR > d.get("point_source_SNR", 5)
)
df = df[high_flux_good_SNR]
coordinates = np.deg2rad([df.dec, df.ra])
mask = so_map.generate_source_mask(binary, coordinates, d.get("point_source_radius", 5.0))
# Monster sources
if "monster_source_file" in d:
print("Adding monster point sources...")
df = pd.read_csv(d["monster_source_file"], comment="#")
for index, row in df.iterrows():
mask.data *= so_map.generate_source_mask(
binary, np.deg2rad([row.dec, row.ra]), row.radius
).data
# Dust
if "dust_file" in d:
print("Adding dust sources...")
dust = so_map.read_map(d["dust_file"])
mask.data *= dust.data
print("Writing mask...")
mask.write_map(d["output_file"])
mask.downgrade(4).plot(file_name=d["output_file"].replace(".fits", ""))
| [
"pspy.so_dict.so_dict",
"pandas.read_csv",
"pspy.so_map.read_map",
"numpy.deg2rad",
"pandas.read_table"
] | [((89, 106), 'pspy.so_dict.so_dict', 'so_dict.so_dict', ([], {}), '()\n', (104, 106), False, 'from pspy import so_dict, so_map\n'), ((147, 177), 'pspy.so_map.read_map', 'so_map.read_map', (["d['template']"], {}), "(d['template'])\n", (162, 177), False, 'from pspy import so_dict, so_map\n'), ((424, 489), 'pandas.read_table', 'pd.read_table', (["d['point_source_file']"], {'escapechar': '"""#"""', 'sep': '"""\\\\s+"""'}), "(d['point_source_file'], escapechar='#', sep='\\\\s+')\n", (437, 489), True, 'import pandas as pd\n'), ((665, 692), 'numpy.deg2rad', 'np.deg2rad', (['[df.dec, df.ra]'], {}), '([df.dec, df.ra])\n', (675, 692), True, 'import numpy as np\n'), ((892, 942), 'pandas.read_csv', 'pd.read_csv', (["d['monster_source_file']"], {'comment': '"""#"""'}), "(d['monster_source_file'], comment='#')\n", (903, 942), True, 'import pandas as pd\n'), ((1183, 1214), 'pspy.so_map.read_map', 'so_map.read_map', (["d['dust_file']"], {}), "(d['dust_file'])\n", (1198, 1214), False, 'from pspy import so_dict, so_map\n'), ((1050, 1079), 'numpy.deg2rad', 'np.deg2rad', (['[row.dec, row.ra]'], {}), '([row.dec, row.ra])\n', (1060, 1079), True, 'import numpy as np\n')] |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
def generate_rnn(rng, N, g, tau, dt, max_firing_rate):
"""Create a (vanilla) RNN with a bunch of hyper parameters for generating
chaotic data.
Args:
rng: numpy random number generator
N: number of hidden units
g: scaling of recurrent weight matrix in g W, with W ~ N(0,1/N)
tau: time scale of individual unit dynamics
dt: time step for equation updates
max_firing_rate: how to resecale the -1,1 firing rates
Returns:
the dictionary of these parameters, plus some others.
"""
rnn = {}
rnn['N'] = N
rnn['W'] = rng.randn(N,N)/np.sqrt(N)
rnn['Bin'] = rng.randn(N)/np.sqrt(1.0)
rnn['Bin2'] = rng.randn(N)/np.sqrt(1.0)
rnn['b'] = np.zeros(N)
rnn['g'] = g
rnn['tau'] = tau
rnn['dt'] = dt
rnn['max_firing_rate'] = max_firing_rate
mfr = rnn['max_firing_rate'] # spikes / sec
nbins_per_sec = 1.0/rnn['dt'] # bins / sec
# Used for plotting in LFADS
rnn['conversion_factor'] = mfr / nbins_per_sec # spikes / bin
return rnn
def generate_data(rnn, T, E, x0s=None, P_sxn=None, input_magnitude=0.0,
input_times=None):
""" Generates data from an randomly initialized RNN.
Args:
rnn: the rnn
T: Time in seconds to run (divided by rnn['dt'] to get steps, rounded down.
E: total number of examples
S: number of samples (subsampling N)
Returns:
A list of length E of NxT tensors of the network being run.
"""
N = rnn['N']
def run_rnn(rnn, x0, ntime_steps, input_time=None):
rs = np.zeros([N,ntime_steps])
x_tm1 = x0
r_tm1 = np.tanh(x0)
tau = rnn['tau']
dt = rnn['dt']
alpha = (1.0-dt/tau)
W = dt/tau*rnn['W']*rnn['g']
Bin = dt/tau*rnn['Bin']
Bin2 = dt/tau*rnn['Bin2']
b = dt/tau*rnn['b']
us = np.zeros([1, ntime_steps])
for t in range(ntime_steps):
x_t = alpha*x_tm1 + np.dot(W,r_tm1) + b
if input_time is not None and t == input_time:
us[0,t] = input_magnitude
x_t += Bin * us[0,t] # DCS is this what was used?
r_t = np.tanh(x_t)
x_tm1 = x_t
r_tm1 = r_t
rs[:,t] = r_t
return rs, us
if P_sxn is None:
P_sxn = np.eye(N)
ntime_steps = int(T / rnn['dt'])
data_e = []
inputs_e = []
for e in range(E):
input_time = input_times[e] if input_times is not None else None
r_nxt, u_uxt = run_rnn(rnn, x0s[:,e], ntime_steps, input_time)
r_sxt = np.dot(P_sxn, r_nxt)
inputs_e.append(u_uxt)
data_e.append(r_sxt)
S = P_sxn.shape[0]
data_e = normalize_rates(data_e, E, S)
return data_e, x0s, inputs_e
def normalize_rates(data_e, E, S):
# Normalization, made more complex because of the P matrices.
# Normalize by min and max in each channel. This normalization will
# cause offset differences between identical rnn runs, but different
# t hits.
for e in range(E):
r_sxt = data_e[e]
for i in range(S):
rmin = np.min(r_sxt[i,:])
rmax = np.max(r_sxt[i,:])
assert rmax - rmin != 0, 'Something wrong'
r_sxt[i,:] = (r_sxt[i,:] - rmin)/(rmax-rmin)
data_e[e] = r_sxt
return data_e
def spikify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process.
"""
E = len(data_e)
spikes_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
data_s = np.zeros([N,T]).astype(np.int)
for n in range(N):
f = data[n,:]
s = rng.poisson(f*max_firing_rate*dt, size=T)
data_s[n,:] = s
spikes_e.append(data_s)
return spikes_e
def gaussify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply gaussian noise to a continuous dataset whose values are between
0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
gauss_e: a list of length b of the data with noise.
"""
E = len(data_e)
mfr = max_firing_rate
gauss_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
noisy_data = data * mfr + np.random.randn(N,T) * (5.0*mfr) * np.sqrt(dt)
gauss_e.append(noisy_data)
return gauss_e
def get_train_n_valid_inds(num_trials, train_fraction, nreplications):
"""Split the numbers between 0 and num_trials-1 into two portions for
training and validation, based on the train fraction.
Args:
num_trials: the number of trials
train_fraction: (e.g. .80)
nreplications: the number of spiking trials per initial condition
Returns:
a 2-tuple of two lists: the training indices and validation indices
"""
train_inds = []
valid_inds = []
for i in range(num_trials):
# This line divides up the trials so that within one initial condition,
# the randomness of spikifying the condition is shared among both
# training and validation data splits.
if (i % nreplications)+1 > train_fraction * nreplications:
valid_inds.append(i)
else:
train_inds.append(i)
return train_inds, valid_inds
def split_list_by_inds(data, inds1, inds2):
"""Take the data, a list, and split it up based on the indices in inds1 and
inds2.
Args:
data: the list of data to split
inds1, the first list of indices
inds2, the second list of indices
Returns: a 2-tuple of two lists.
"""
if data is None or len(data) == 0:
return [], []
else:
dout1 = [data[i] for i in inds1]
dout2 = [data[i] for i in inds2]
return dout1, dout2
def nparray_and_transpose(data_a_b_c):
"""Convert the list of items in data to a numpy array, and transpose it
Args:
data: data_asbsc: a nested, nested list of length a, with sublist length
b, with sublist length c.
Returns:
a numpy 3-tensor with dimensions a x c x b
"""
data_axbxc = np.array([datum_b_c for datum_b_c in data_a_b_c])
data_axcxb = np.transpose(data_axbxc, axes=[0,2,1])
return data_axcxb
def add_alignment_projections(datasets, npcs, ntime=None, nsamples=None):
"""Create a matrix that aligns the datasets a bit, under
the assumption that each dataset is observing the same underlying dynamical
system.
Args:
datasets: The dictionary of dataset structures.
npcs: The number of pcs for each, basically like lfads factors.
nsamples (optional): Number of samples to take for each dataset.
ntime (optional): Number of time steps to take in each sample.
Returns:
The dataset structures, with the field alignment_matrix_cxf added.
This is # channels x npcs dimension
"""
nchannels_all = 0
channel_idxs = {}
conditions_all = {}
nconditions_all = 0
for name, dataset in datasets.items():
cidxs = np.where(dataset['P_sxn'])[1] # non-zero entries in columns
channel_idxs[name] = [cidxs[0], cidxs[-1]+1]
nchannels_all += cidxs[-1]+1 - cidxs[0]
conditions_all[name] = np.unique(dataset['condition_labels_train'])
all_conditions_list = \
np.unique(np.ndarray.flatten(np.array(conditions_all.values())))
nconditions_all = all_conditions_list.shape[0]
if ntime is None:
ntime = dataset['train_data'].shape[1]
if nsamples is None:
nsamples = dataset['train_data'].shape[0]
# In the data workup in the paper, Chethan did intra condition
# averaging, so let's do that here.
avg_data_all = {}
for name, conditions in conditions_all.items():
dataset = datasets[name]
avg_data_all[name] = {}
for cname in conditions:
td_idxs = np.argwhere(np.array(dataset['condition_labels_train'])==cname)
data = np.squeeze(dataset['train_data'][td_idxs,:,:], axis=1)
avg_data = np.mean(data, axis=0)
avg_data_all[name][cname] = avg_data
# Visualize this in the morning.
all_data_nxtc = np.zeros([nchannels_all, ntime * nconditions_all])
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
for cname in conditions_all[name]:
cidxs = np.argwhere(all_conditions_list == cname)
if cidxs.shape[0] > 0:
cidx = cidxs[0][0]
all_tidxs = np.arange(0, ntime+1) + cidx*ntime
all_data_nxtc[cidx_s:cidx_f, all_tidxs[0]:all_tidxs[-1]] = \
avg_data_all[name][cname].T
# A bit of filtering. We don't care about spectral properties, or
# filtering artifacts, simply correlate time steps a bit.
filt_len = 6
bc_filt = np.ones([filt_len])/float(filt_len)
for c in range(nchannels_all):
all_data_nxtc[c,:] = scipy.signal.filtfilt(bc_filt, [1.0], all_data_nxtc[c,:])
# Compute the PCs.
all_data_mean_nx1 = np.mean(all_data_nxtc, axis=1, keepdims=True)
all_data_zm_nxtc = all_data_nxtc - all_data_mean_nx1
corr_mat_nxn = np.dot(all_data_zm_nxtc, all_data_zm_nxtc.T)
evals_n, evecs_nxn = np.linalg.eigh(corr_mat_nxn)
sidxs = np.flipud(np.argsort(evals_n)) # sort such that 0th is highest
evals_n = evals_n[sidxs]
evecs_nxn = evecs_nxn[:,sidxs]
# Project all the channels data onto the low-D PCA basis, where
# low-d is the npcs parameter.
all_data_pca_pxtc = np.dot(evecs_nxn[:, 0:npcs].T, all_data_zm_nxtc)
# Now for each dataset, we regress the channel data onto the top
# pcs, and this will be our alignment matrix for that dataset.
# |B - A*W|^2
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
all_data_zm_chxtc = all_data_zm_nxtc[cidx_s:cidx_f,:] # ch for channel
W_chxp, _, _, _ = \
np.linalg.lstsq(all_data_zm_chxtc.T, all_data_pca_pxtc.T)
dataset['alignment_matrix_cxf'] = W_chxp
alignment_bias_cx1 = all_data_mean_nx1[cidx_s:cidx_f]
dataset['alignment_bias_c'] = np.squeeze(alignment_bias_cx1, axis=1)
do_debug_plot = False
if do_debug_plot:
pc_vecs = evecs_nxn[:,0:npcs]
ntoplot = 400
plt.figure()
plt.plot(np.log10(evals_n), '-x')
plt.figure()
plt.subplot(311)
plt.imshow(all_data_pca_pxtc)
plt.colorbar()
plt.subplot(312)
plt.imshow(np.dot(W_chxp.T, all_data_zm_chxtc))
plt.colorbar()
plt.subplot(313)
plt.imshow(np.dot(all_data_zm_chxtc.T, W_chxp).T - all_data_pca_pxtc)
plt.colorbar()
import pdb
pdb.set_trace()
return datasets
| [
"numpy.log10",
"numpy.sqrt",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.where",
"numpy.tanh",
"numpy.max",
"numpy.dot",
"numpy.linalg.lstsq",
"numpy.min",
"numpy.linalg.eigh",
"numpy.eye",
"numpy.ones",
"numpy.squeeze",
"numpy.t... | [((1461, 1472), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1469, 1472), True, 'import numpy as np\n'), ((6899, 6948), 'numpy.array', 'np.array', (['[datum_b_c for datum_b_c in data_a_b_c]'], {}), '([datum_b_c for datum_b_c in data_a_b_c])\n', (6907, 6948), True, 'import numpy as np\n'), ((6964, 7004), 'numpy.transpose', 'np.transpose', (['data_axbxc'], {'axes': '[0, 2, 1]'}), '(data_axbxc, axes=[0, 2, 1])\n', (6976, 7004), True, 'import numpy as np\n'), ((8825, 8875), 'numpy.zeros', 'np.zeros', (['[nchannels_all, ntime * nconditions_all]'], {}), '([nchannels_all, ntime * nconditions_all])\n', (8833, 8875), True, 'import numpy as np\n'), ((9654, 9699), 'numpy.mean', 'np.mean', (['all_data_nxtc'], {'axis': '(1)', 'keepdims': '(True)'}), '(all_data_nxtc, axis=1, keepdims=True)\n', (9661, 9699), True, 'import numpy as np\n'), ((9772, 9816), 'numpy.dot', 'np.dot', (['all_data_zm_nxtc', 'all_data_zm_nxtc.T'], {}), '(all_data_zm_nxtc, all_data_zm_nxtc.T)\n', (9778, 9816), True, 'import numpy as np\n'), ((9840, 9868), 'numpy.linalg.eigh', 'np.linalg.eigh', (['corr_mat_nxn'], {}), '(corr_mat_nxn)\n', (9854, 9868), True, 'import numpy as np\n'), ((10124, 10172), 'numpy.dot', 'np.dot', (['evecs_nxn[:, 0:npcs].T', 'all_data_zm_nxtc'], {}), '(evecs_nxn[:, 0:npcs].T, all_data_zm_nxtc)\n', (10130, 10172), True, 'import numpy as np\n'), ((1354, 1364), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (1361, 1364), True, 'import numpy as np\n'), ((1393, 1405), 'numpy.sqrt', 'np.sqrt', (['(1.0)'], {}), '(1.0)\n', (1400, 1405), True, 'import numpy as np\n'), ((1435, 1447), 'numpy.sqrt', 'np.sqrt', (['(1.0)'], {}), '(1.0)\n', (1442, 1447), True, 'import numpy as np\n'), ((2304, 2330), 'numpy.zeros', 'np.zeros', (['[N, ntime_steps]'], {}), '([N, ntime_steps])\n', (2312, 2330), True, 'import numpy as np\n'), ((2357, 2368), 'numpy.tanh', 'np.tanh', (['x0'], {}), '(x0)\n', (2364, 2368), True, 'import numpy as np\n'), ((2559, 2585), 'numpy.zeros', 'np.zeros', (['[1, ntime_steps]'], {}), '([1, ntime_steps])\n', (2567, 2585), True, 'import numpy as np\n'), ((2942, 2951), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (2948, 2951), True, 'import numpy as np\n'), ((3186, 3206), 'numpy.dot', 'np.dot', (['P_sxn', 'r_nxt'], {}), '(P_sxn, r_nxt)\n', (3192, 3206), True, 'import numpy as np\n'), ((7956, 8000), 'numpy.unique', 'np.unique', (["dataset['condition_labels_train']"], {}), "(dataset['condition_labels_train'])\n", (7965, 8000), True, 'import numpy as np\n'), ((9458, 9477), 'numpy.ones', 'np.ones', (['[filt_len]'], {}), '([filt_len])\n', (9465, 9477), True, 'import numpy as np\n'), ((9889, 9908), 'numpy.argsort', 'np.argsort', (['evals_n'], {}), '(evals_n)\n', (9899, 9908), True, 'import numpy as np\n'), ((10540, 10597), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['all_data_zm_chxtc.T', 'all_data_pca_pxtc.T'], {}), '(all_data_zm_chxtc.T, all_data_pca_pxtc.T)\n', (10555, 10597), True, 'import numpy as np\n'), ((10735, 10773), 'numpy.squeeze', 'np.squeeze', (['alignment_bias_cx1'], {'axis': '(1)'}), '(alignment_bias_cx1, axis=1)\n', (10745, 10773), True, 'import numpy as np\n'), ((10876, 10888), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10886, 10888), True, 'import matplotlib.pyplot as plt\n'), ((10931, 10943), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10941, 10943), True, 'import matplotlib.pyplot as plt\n'), ((10948, 10964), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (10959, 10964), True, 'import matplotlib.pyplot as plt\n'), ((10969, 10998), 'matplotlib.pyplot.imshow', 'plt.imshow', (['all_data_pca_pxtc'], {}), '(all_data_pca_pxtc)\n', (10979, 10998), True, 'import matplotlib.pyplot as plt\n'), ((11003, 11017), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11015, 11017), True, 'import matplotlib.pyplot as plt\n'), ((11023, 11039), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (11034, 11039), True, 'import matplotlib.pyplot as plt\n'), ((11096, 11110), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11108, 11110), True, 'import matplotlib.pyplot as plt\n'), ((11116, 11132), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (11127, 11132), True, 'import matplotlib.pyplot as plt\n'), ((11211, 11225), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11223, 11225), True, 'import matplotlib.pyplot as plt\n'), ((11246, 11261), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (11259, 11261), False, 'import pdb\n'), ((2822, 2834), 'numpy.tanh', 'np.tanh', (['x_t'], {}), '(x_t)\n', (2829, 2834), True, 'import numpy as np\n'), ((3688, 3707), 'numpy.min', 'np.min', (['r_sxt[i, :]'], {}), '(r_sxt[i, :])\n', (3694, 3707), True, 'import numpy as np\n'), ((3720, 3739), 'numpy.max', 'np.max', (['r_sxt[i, :]'], {}), '(r_sxt[i, :])\n', (3726, 3739), True, 'import numpy as np\n'), ((7776, 7802), 'numpy.where', 'np.where', (["dataset['P_sxn']"], {}), "(dataset['P_sxn'])\n", (7784, 7802), True, 'import numpy as np\n'), ((8634, 8690), 'numpy.squeeze', 'np.squeeze', (["dataset['train_data'][td_idxs, :, :]"], {'axis': '(1)'}), "(dataset['train_data'][td_idxs, :, :], axis=1)\n", (8644, 8690), True, 'import numpy as np\n'), ((8706, 8727), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (8713, 8727), True, 'import numpy as np\n'), ((9040, 9081), 'numpy.argwhere', 'np.argwhere', (['(all_conditions_list == cname)'], {}), '(all_conditions_list == cname)\n', (9051, 9081), True, 'import numpy as np\n'), ((10902, 10919), 'numpy.log10', 'np.log10', (['evals_n'], {}), '(evals_n)\n', (10910, 10919), True, 'import numpy as np\n'), ((11055, 11090), 'numpy.dot', 'np.dot', (['W_chxp.T', 'all_data_zm_chxtc'], {}), '(W_chxp.T, all_data_zm_chxtc)\n', (11061, 11090), True, 'import numpy as np\n'), ((4440, 4456), 'numpy.zeros', 'np.zeros', (['[N, T]'], {}), '([N, T])\n', (4448, 4456), True, 'import numpy as np\n'), ((5221, 5232), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (5228, 5232), True, 'import numpy as np\n'), ((2645, 2661), 'numpy.dot', 'np.dot', (['W', 'r_tm1'], {}), '(W, r_tm1)\n', (2651, 2661), True, 'import numpy as np\n'), ((5186, 5207), 'numpy.random.randn', 'np.random.randn', (['N', 'T'], {}), '(N, T)\n', (5201, 5207), True, 'import numpy as np\n'), ((8569, 8612), 'numpy.array', 'np.array', (["dataset['condition_labels_train']"], {}), "(dataset['condition_labels_train'])\n", (8577, 8612), True, 'import numpy as np\n'), ((9158, 9181), 'numpy.arange', 'np.arange', (['(0)', '(ntime + 1)'], {}), '(0, ntime + 1)\n', (9167, 9181), True, 'import numpy as np\n'), ((11148, 11183), 'numpy.dot', 'np.dot', (['all_data_zm_chxtc.T', 'W_chxp'], {}), '(all_data_zm_chxtc.T, W_chxp)\n', (11154, 11183), True, 'import numpy as np\n')] |
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, PathPatch
from scipy.sparse import csr_matrix, coo_matrix
from scipy.sparse.csgraph import dijkstra
from timeit import default_timer as timer
from mpl_toolkits.mplot3d import axes3d
import mpl_toolkits.mplot3d.art3d as art3d
matplotlib.use("TkAgg") # for plotting, had to sudo apt-get install python3-tk
# TODO: edit section
class DistanceGraph:
cs_graph = None
dist_matrix = None
predecessors = None
def __init__(self, args, field, num_vertices, obstacles):
# field is of form [m_x, m_y, m_z, l, w, h], same format as in mujoco
# obstacles is list with entries of form: [m_x, m_y, m_z, l, w, h], same format as in mujoco
# Up to 10000 nodes can be handled memorywise (computation time non-problematic in this case)
# x_spaces * y_spaces * z_spaces should not increase beyond 8000
[m_x, m_y, m_z, l, w, h] = field
self.args = args
# Get min and max coordinate values, number of spaces in each direction, obstacles and z_penalty
self.x_min = m_x - l
self.y_min = m_y - w
self.z_min = m_z - h
self.x_max = m_x + l
self.y_max = m_y + w
self.z_max = m_z + h
assert len(num_vertices) == 3
self.n_x = num_vertices[0]
self.n_y = num_vertices[1]
self.n_z = num_vertices[2]
self.obstacles = obstacles
# Compute space between vertices in each direction
self.dx = (self.x_max - self.x_min) / (self.n_x - 1)
self.dy = (self.y_max - self.y_min) / (self.n_y - 1)
self.dz = (self.z_max - self.z_min) / (self.n_z - 1)
# lower_band and diff
self.lower_band = [self.x_min, self.y_min, self.z_min]
self.diff = [self.dx, self.dy, self.dz]
self.hash = [1, self.n_x, self.n_x * self.n_y]
# total number of vertices
self.n = self.n_x * self.n_y * self.n_z
# initialize obstacle_vertices matrix
# has size of the n_x x n_y x n_z, can be indexed by i,j,k
# 0 entry means "no obstacle", 1 entry means "obstacle", at the corresponding vertex
self.obstacle_vertices = np.zeros((self.n_x + 1, self.n_y + 1, self.n_z + 1))
# boundaries initialized as obstacle
self.obstacle_vertices[-1, :, :] = 1
self.obstacle_vertices[:, -1, :] = 1
self.obstacle_vertices[:, :, -1] = 1
# previous: array of indices used to iterate through the vertices
self.previous = list()
for a in [-1, 0, 1]:
for b in [-1, 0, 1]:
self.previous.append([a, b, -1])
for a in [-1, 0, 1]:
self.previous.append([a, -1, 0])
self.previous.append([-1, 0, 0])
# check vertex density criterion (obstacles only detectable if e.g. l>dx/2)
graph_okay = True
n_x_min = 0
n_y_min = 0
n_z_min = 0
for [m_x, m_y, m_z, l, w, h] in self.obstacles:
n_x_min = max(n_x_min, (self.x_max - self.x_min) / (2 * l) + 1)
n_y_min = max(n_y_min, (self.y_max - self.y_min) / (2 * w) + 1)
n_z_min = max(n_z_min, (self.z_max - self.z_min) / (2 * h) + 1)
if l <= self.dx / 2 or w <= self.dy / 2 or h <= self.dz / 2:
graph_okay = False
# print section
if self.args:
self.args.logger.info("Created DistanceGraph with: ")
self.args.logger.info(
"\tField: x: [{}, {}], y: [{}, {}], z: [{}, {}]".format(self.x_min, self.x_max, self.y_min, self.y_max,
self.z_min, self.z_max))
self.args.logger.info("\tObstacles:")
for obstacle in obstacles:
self.args.logger.info("\t\t{}".format(obstacle))
self.args.logger.info(
"\tNumber of vertices: n_x: {}, n_y: {}, n_z: {}".format(self.n_x, self.n_y, self.n_z))
self.args.logger.info("\tTotal number of vertices: {}".format(self.n))
self.args.logger.info("\tDx: {}, Dy: {}, Dz: {}".format(self.dx, self.dy, self.dz))
self.args.logger.info(
"\tRequired number of vertices: n_x > {}, n_y > {}, n_z > {}".format(n_x_min, n_y_min, n_z_min))
if not graph_okay:
raise Exception("Vertex density is not high enough, requirements see above")
def is_obstacle(self, x, y, z):
# checks whether a point (x, y, z) lies inside an obstacle
for [m_x, m_y, m_z, l, w, h] in self.obstacles:
# l, w, h are increased to make sure that the edges of the obstacles are considered as well
l += 0.02
w += 0.02
h += 0.02
if m_x - l <= x <= m_x + l and m_y - w <= y <= m_y + w and m_z - h <= z <= m_z + h:
return True
return False
def gridpoint2vertex(self, gridpoint) -> int:
# converts gridpoint representation [i, j, k] to vertex ID
node = np.dot(gridpoint, self.hash)
return int(node)
def vertex2gridpoint(self, vertex) -> (int, int, int):
# converts vertex ID to gridpoint representation [i, j ,k]
k = np.floor(vertex / (self.n_x * self.n_y))
new = vertex % (self.n_x * self.n_y)
j = np.floor(new / self.n_x)
i = vertex % self.n_x
return i, j, k
def gridpoint2coords(self, gridpoint) -> (int, int, int):
# converts gridpoint representation [i, j, k] to coords representation [x, y, z]
[i, j, k] = gridpoint
x = self.x_min + i * self.dx
y = self.y_min + j * self.dy
z = self.z_min + k * self.dz
return x, y, z
def coords2gridpoint(self, coords) -> (int, int, int):
# converts coords representation [x, y, z] to gridpoint representation [i, j, k]
threshold = 0.00001
[x, y, z] = coords
if not (
self.x_min - threshold <= x <= self.x_max + threshold and self.y_min - threshold <= y <= self.y_max + threshold and self.z_min - threshold <= z <= self.z_max + threshold):
return None
return np.round((coords - self.lower_band) / self.diff)
def compute_cs_graph(self):
# create cs_graph as a sparse matrix of size [num_nodes, num_nodes],
# where the entry cs_graph[node_a, node_b] == True only if there is a connection between node_a and node_b
# only connecting nodes that do not lie within an obstacle
if self.args:
self.args.logger.info("Computing {}x{} cs_graph ...".format(self.n, self.n))
start = timer()
row = list()
col = list()
data = list()
# mark nodes lying inside an obstacle as 1 in self.obstacle_nodes
for i in range(self.n_x):
for j in range(self.n_y):
for k in range(self.n_z):
x, y, z = self.gridpoint2coords([i, j, k])
if self.is_obstacle(x, y, z): # if point is black
self.obstacle_vertices[i, j, k] = 1
# connect only non-obstacle vertices with edges
# the edge's weight corresponds to the distance between the vertices
# edges are stored in lists row and col
for i in range(self.n_x):
for j in range(self.n_y):
for k in range(self.n_z):
for a, b, c in self.previous:
if self.obstacle_vertices[i, j, k] == 0:
if self.obstacle_vertices[i + a, j + b, k + c] == 0: # i.e. is white
basevertex = self.gridpoint2vertex([i, j, k])
connectvertex = self.gridpoint2vertex([i + a, j + b, k + c])
# distance d between two vertices
d = np.sqrt(
a * a * self.dx * self.dx + b * b * self.dy * self.dy + c * c * self.dz * self.dz)
row.append(basevertex)
col.append(connectvertex)
data.append(d)
# create cs_graph as a sparse matrix,
# where the entry cs_graph[node_a, node_b] == True only if there is a connection between vertex_a and vertex_b
self.cs_graph = csr_matrix((data, (row, col)), shape=(self.n, self.n))
end = timer()
if self.args:
self.args.logger.info("\tdone after {} secs".format(end - start))
def compute_dist_matrix(self, compute_predecessors=False):
# create a distance_matrix dist_matrix from self.cs_graph by using dijkstra shortest path algorithm
# dist_matrix is fully populated of size n x n
# the entry dist_matrix[vertex_a, vertex_b] contains the shortest distance on cs_graph between vertex_a and vertex_b
if self.args:
self.args.logger.info("Computing {}x{} dist_matrix ...".format(self.n, self.n))
start = timer()
if self.cs_graph is None:
raise Exception("No CS_Graph available!")
if compute_predecessors:
self.dist_matrix, self.predecessors = dijkstra(self.cs_graph, directed=False, return_predecessors=True)
else:
self.dist_matrix = dijkstra(self.cs_graph, directed=False, return_predecessors=False)
end = timer()
if self.args:
self.args.logger.info("\t done after {} secs".format(end - start))
def get_dist_grid(self, coords1, coords2, return_path=False):
gridpoint1 = self.coords2gridpoint(coords1)
gridpoint2 = self.coords2gridpoint(coords2)
if gridpoint2 is None or gridpoint1 is None:
return np.inf
else:
return abs(gridpoint1[0] - gridpoint2[0]) + abs(gridpoint1[1] - gridpoint2[1]) + abs(
gridpoint1[2] - gridpoint2[2])
def get_dist(self, coords1, coords2, return_path=False):
# get the shortest distance between coord1 and coords2 (each of form [x, y, z]) on cs_graph
# in case a predecessor matrix has been calculated, one can also return the shortest path
if self.dist_matrix is None:
raise Exception("No dist_matrix available!")
# transfer coords [x, y, z] to the closest node in gridpoint representation [i, j, k]
gridpoint1 = self.coords2gridpoint(coords1)
gridpoint2 = self.coords2gridpoint(coords2)
# if gridpoint is not in grid, assume there is no connection (shortest distance = inf)
if gridpoint1 is None or gridpoint2 is None:
return np.inf, None
# transfer gridpoint representation to vertex ID
vertex_a = self.gridpoint2vertex(gridpoint1)
vertex_b = self.gridpoint2vertex(gridpoint2)
if not return_path:
return self.dist_matrix[vertex_a][vertex_b], None
else:
if self.predecessors is None:
raise Exception("No predecessors available!")
path = []
current_node = vertex_b
path.append(self.gridpoint2coords(self.vertex2gridpoint(current_node)))
while current_node != vertex_a:
current_node = self.predecessors[vertex_a, current_node]
# if there is no path, dijkstra writes -9999 to predecessor matrix
if current_node == -9999:
if self.args:
self.args.logger.info("No path!")
return self.dist_matrix[vertex_a][vertex_b], None
path.append(self.gridpoint2coords(self.vertex2gridpoint(current_node)))
return self.dist_matrix[vertex_a][vertex_b], path
def plot_goals(self, goals=None, colors=None, azim=-12, elev=15, show=False, save_path='test', extra=None):
# Plot goals with different options
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
co_graph = coo_matrix(self.cs_graph)
# scatter plot boundaries of field
x_array = [self.x_min, self.x_min, self.x_min, self.x_min, self.x_max, self.x_max, self.x_max, self.x_max]
y_array = [self.y_min, self.y_min, self.y_max, self.y_max, self.y_min, self.y_min, self.y_max, self.y_max]
z_array = [self.z_min, self.z_max, self.z_min, self.z_max, self.z_min, self.z_max, self.z_min, self.z_max]
ax.scatter(x_array, y_array, z_array, c='b')
# plots obstacle
for [m_x, m_y, m_z, l, w, h] in self.obstacles:
# top
side1 = Rectangle((m_x - l, m_y - w), 2 * l, 2 * w, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_z + h, zdir="z", )
# bottom
side1 = Rectangle((m_x - l, m_y - w), 2 * l, 2 * w, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_z - h, zdir="z")
# back
side1 = Rectangle((m_y - w, m_z - h), 2 * w, 2 * h, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_x + l, zdir="x")
# front
side1 = Rectangle((m_y - w, m_z - h), 2 * w, 2 * h, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_x - l, zdir="x")
# right
side1 = Rectangle((m_x - l, m_z - h), 2 * l, 2 * h, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_y + w, zdir="y")
# left
side1 = Rectangle((m_x - l, m_z - h), 2 * l, 2 * h, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_y - w, zdir="y")
# plot goals:
for i in range(len(goals)):
current_goals = goals[i]
current_color = colors[i]
for goal in current_goals:
x = goal[0]
y = goal[1]
z = goal[2]
ax.scatter([x], [y], [z], c=current_color)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_zlim(0.4, 0.8)
if extra == 1:
ax.set_yticks([0.5, 0.7, 0.9, 1.1])
ax.set_zticks([0.4, 0.5, 0.6, 0.7, 0.8])
ax.view_init(elev=elev, azim=azim)
if show:
plt.show()
plt.savefig(save_path + ".pdf")
def plot_graph(self, path=None, graph=False, obstacle_vertices=False, goals=None, save_path='test', show=False,
azim=-12, elev=15, extra=None):
# Plot graph with different options
if self.args:
self.args.logger.info("Plotting ...")
if self.cs_graph is None:
raise Exception("No cs_graph available")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
co_graph = coo_matrix(self.cs_graph)
# scatter plot boundaries of field
x_array = [self.x_min, self.x_min, self.x_min, self.x_min, self.x_max, self.x_max, self.x_max, self.x_max]
y_array = [self.y_min, self.y_min, self.y_max, self.y_max, self.y_min, self.y_min, self.y_max, self.y_max]
z_array = [self.z_min, self.z_max, self.z_min, self.z_max, self.z_min, self.z_max, self.z_min, self.z_max]
ax.scatter(x_array, y_array, z_array, c='b')
# plots obstacle
for [m_x, m_y, m_z, l, w, h] in self.obstacles:
# top
side1 = Rectangle((m_x - l, m_y - w), 2 * l, 2 * w, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_z + h, zdir="z", )
# bottom
side1 = Rectangle((m_x - l, m_y - w), 2 * l, 2 * w, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_z - h, zdir="z")
# back
side1 = Rectangle((m_y - w, m_z - h), 2 * w, 2 * h, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_x + l, zdir="x")
# front
side1 = Rectangle((m_y - w, m_z - h), 2 * w, 2 * h, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_x - l, zdir="x")
# right
side1 = Rectangle((m_x - l, m_z - h), 2 * l, 2 * h, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_y + w, zdir="y")
# left
side1 = Rectangle((m_x - l, m_z - h), 2 * l, 2 * h, color=[0, 0, 1, 0.1])
ax.add_patch(side1)
art3d.pathpatch_2d_to_3d(side1, z=m_y - w, zdir="y")
if path:
for i in range(len(path) - 1):
a = path[i]
b = path[i + 1]
X, Y, Z = [a[0], b[0]], [a[1], b[1]], [a[2], b[2]]
ax.plot(X, Y, Z, c=[1, 0, 0, 1])
# plot graph edges
if graph:
for i, j, v in zip(co_graph.row, co_graph.col, co_graph.data):
a = self.gridpoint2coords(self.vertex2gridpoint(i))
b = self.gridpoint2coords(self.vertex2gridpoint(j))
X, Y, Z = [a[0], b[0]], [a[1], b[1]], [a[2], b[2]]
ax.plot(X, Y, Z, c=[0, 0, 0, 0.2])
# scatter plot vertices that are marked as black (with obstacle)
if obstacle_vertices:
for i in range(self.n_x):
for j in range(self.n_y):
for k in range(self.n_z):
x, y, z = self.gridpoint2coords([i, j, k])
if self.obstacle_vertices[i, j, k] == 0:
ax.scatter([x], [y], [z], c=[0, 0, 0, 0.3])
# plot goals:
if goals:
for goal in goals:
x = goal[0]
y = goal[1]
z = goal[2]
ax.scatter([x], [y], [z], c='green')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if extra == 1:
ax.set_xticks([1.1, 1.3, 1.5])
ax.set_zticks([0.4, 0.5, 0.6, 0.7, 0.8])
ax.view_init(elev=elev, azim=azim)
if show:
plt.show()
plt.savefig(save_path + ".pdf")
if self.args:
self.args.logger.info("\tdone")
| [
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.use",
"timeit.default_timer",
"numpy.floor",
"scipy.sparse.csgraph.dijkstra",
"numpy.dot",
"numpy.zeros",
"matplotlib.pyplot.figure",
"mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d",
"scipy.sparse.coo_matrix... | [((348, 371), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (362, 371), False, 'import matplotlib\n'), ((2255, 2307), 'numpy.zeros', 'np.zeros', (['(self.n_x + 1, self.n_y + 1, self.n_z + 1)'], {}), '((self.n_x + 1, self.n_y + 1, self.n_z + 1))\n', (2263, 2307), True, 'import numpy as np\n'), ((5094, 5122), 'numpy.dot', 'np.dot', (['gridpoint', 'self.hash'], {}), '(gridpoint, self.hash)\n', (5100, 5122), True, 'import numpy as np\n'), ((5287, 5327), 'numpy.floor', 'np.floor', (['(vertex / (self.n_x * self.n_y))'], {}), '(vertex / (self.n_x * self.n_y))\n', (5295, 5327), True, 'import numpy as np\n'), ((5385, 5409), 'numpy.floor', 'np.floor', (['(new / self.n_x)'], {}), '(new / self.n_x)\n', (5393, 5409), True, 'import numpy as np\n'), ((6227, 6275), 'numpy.round', 'np.round', (['((coords - self.lower_band) / self.diff)'], {}), '((coords - self.lower_band) / self.diff)\n', (6235, 6275), True, 'import numpy as np\n'), ((6695, 6702), 'timeit.default_timer', 'timer', ([], {}), '()\n', (6700, 6702), True, 'from timeit import default_timer as timer\n'), ((8409, 8463), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, (row, col))'], {'shape': '(self.n, self.n)'}), '((data, (row, col)), shape=(self.n, self.n))\n', (8419, 8463), False, 'from scipy.sparse import csr_matrix, coo_matrix\n'), ((8478, 8485), 'timeit.default_timer', 'timer', ([], {}), '()\n', (8483, 8485), True, 'from timeit import default_timer as timer\n'), ((9068, 9075), 'timeit.default_timer', 'timer', ([], {}), '()\n', (9073, 9075), True, 'from timeit import default_timer as timer\n'), ((9439, 9446), 'timeit.default_timer', 'timer', ([], {}), '()\n', (9444, 9446), True, 'from timeit import default_timer as timer\n'), ((11932, 11944), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11942, 11944), True, 'import matplotlib.pyplot as plt\n'), ((12015, 12040), 'scipy.sparse.coo_matrix', 'coo_matrix', (['self.cs_graph'], {}), '(self.cs_graph)\n', (12025, 12040), False, 'from scipy.sparse import csr_matrix, coo_matrix\n'), ((14424, 14455), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_path + '.pdf')"], {}), "(save_path + '.pdf')\n", (14435, 14455), True, 'import matplotlib.pyplot as plt\n'), ((14841, 14853), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14851, 14853), True, 'import matplotlib.pyplot as plt\n'), ((14924, 14949), 'scipy.sparse.coo_matrix', 'coo_matrix', (['self.cs_graph'], {}), '(self.cs_graph)\n', (14934, 14949), False, 'from scipy.sparse import csr_matrix, coo_matrix\n'), ((18235, 18266), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_path + '.pdf')"], {}), "(save_path + '.pdf')\n", (18246, 18266), True, 'import matplotlib.pyplot as plt\n'), ((9247, 9312), 'scipy.sparse.csgraph.dijkstra', 'dijkstra', (['self.cs_graph'], {'directed': '(False)', 'return_predecessors': '(True)'}), '(self.cs_graph, directed=False, return_predecessors=True)\n', (9255, 9312), False, 'from scipy.sparse.csgraph import dijkstra\n'), ((9358, 9424), 'scipy.sparse.csgraph.dijkstra', 'dijkstra', (['self.cs_graph'], {'directed': '(False)', 'return_predecessors': '(False)'}), '(self.cs_graph, directed=False, return_predecessors=False)\n', (9366, 9424), False, 'from scipy.sparse.csgraph import dijkstra\n'), ((12601, 12666), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_x - l, m_y - w)', '(2 * l)', '(2 * w)'], {'color': '[0, 0, 1, 0.1]'}), '((m_x - l, m_y - w), 2 * l, 2 * w, color=[0, 0, 1, 0.1])\n', (12610, 12666), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((12711, 12763), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_z + h)', 'zdir': '"""z"""'}), "(side1, z=m_z + h, zdir='z')\n", (12735, 12763), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((12807, 12872), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_x - l, m_y - w)', '(2 * l)', '(2 * w)'], {'color': '[0, 0, 1, 0.1]'}), '((m_x - l, m_y - w), 2 * l, 2 * w, color=[0, 0, 1, 0.1])\n', (12816, 12872), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((12917, 12969), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_z - h)', 'zdir': '"""z"""'}), "(side1, z=m_z - h, zdir='z')\n", (12941, 12969), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((13009, 13074), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_y - w, m_z - h)', '(2 * w)', '(2 * h)'], {'color': '[0, 0, 1, 0.1]'}), '((m_y - w, m_z - h), 2 * w, 2 * h, color=[0, 0, 1, 0.1])\n', (13018, 13074), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((13119, 13171), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_x + l)', 'zdir': '"""x"""'}), "(side1, z=m_x + l, zdir='x')\n", (13143, 13171), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((13212, 13277), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_y - w, m_z - h)', '(2 * w)', '(2 * h)'], {'color': '[0, 0, 1, 0.1]'}), '((m_y - w, m_z - h), 2 * w, 2 * h, color=[0, 0, 1, 0.1])\n', (13221, 13277), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((13322, 13374), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_x - l)', 'zdir': '"""x"""'}), "(side1, z=m_x - l, zdir='x')\n", (13346, 13374), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((13415, 13480), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_x - l, m_z - h)', '(2 * l)', '(2 * h)'], {'color': '[0, 0, 1, 0.1]'}), '((m_x - l, m_z - h), 2 * l, 2 * h, color=[0, 0, 1, 0.1])\n', (13424, 13480), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((13525, 13577), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_y + w)', 'zdir': '"""y"""'}), "(side1, z=m_y + w, zdir='y')\n", (13549, 13577), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((13617, 13682), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_x - l, m_z - h)', '(2 * l)', '(2 * h)'], {'color': '[0, 0, 1, 0.1]'}), '((m_x - l, m_z - h), 2 * l, 2 * h, color=[0, 0, 1, 0.1])\n', (13626, 13682), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((13727, 13779), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_y - w)', 'zdir': '"""y"""'}), "(side1, z=m_y - w, zdir='y')\n", (13751, 13779), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((14405, 14415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14413, 14415), True, 'import matplotlib.pyplot as plt\n'), ((15510, 15575), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_x - l, m_y - w)', '(2 * l)', '(2 * w)'], {'color': '[0, 0, 1, 0.1]'}), '((m_x - l, m_y - w), 2 * l, 2 * w, color=[0, 0, 1, 0.1])\n', (15519, 15575), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((15620, 15672), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_z + h)', 'zdir': '"""z"""'}), "(side1, z=m_z + h, zdir='z')\n", (15644, 15672), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((15716, 15781), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_x - l, m_y - w)', '(2 * l)', '(2 * w)'], {'color': '[0, 0, 1, 0.1]'}), '((m_x - l, m_y - w), 2 * l, 2 * w, color=[0, 0, 1, 0.1])\n', (15725, 15781), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((15826, 15878), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_z - h)', 'zdir': '"""z"""'}), "(side1, z=m_z - h, zdir='z')\n", (15850, 15878), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((15918, 15983), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_y - w, m_z - h)', '(2 * w)', '(2 * h)'], {'color': '[0, 0, 1, 0.1]'}), '((m_y - w, m_z - h), 2 * w, 2 * h, color=[0, 0, 1, 0.1])\n', (15927, 15983), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((16028, 16080), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_x + l)', 'zdir': '"""x"""'}), "(side1, z=m_x + l, zdir='x')\n", (16052, 16080), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((16121, 16186), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_y - w, m_z - h)', '(2 * w)', '(2 * h)'], {'color': '[0, 0, 1, 0.1]'}), '((m_y - w, m_z - h), 2 * w, 2 * h, color=[0, 0, 1, 0.1])\n', (16130, 16186), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((16231, 16283), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_x - l)', 'zdir': '"""x"""'}), "(side1, z=m_x - l, zdir='x')\n", (16255, 16283), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((16324, 16389), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_x - l, m_z - h)', '(2 * l)', '(2 * h)'], {'color': '[0, 0, 1, 0.1]'}), '((m_x - l, m_z - h), 2 * l, 2 * h, color=[0, 0, 1, 0.1])\n', (16333, 16389), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((16434, 16486), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_y + w)', 'zdir': '"""y"""'}), "(side1, z=m_y + w, zdir='y')\n", (16458, 16486), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((16526, 16591), 'matplotlib.patches.Rectangle', 'Rectangle', (['(m_x - l, m_z - h)', '(2 * l)', '(2 * h)'], {'color': '[0, 0, 1, 0.1]'}), '((m_x - l, m_z - h), 2 * l, 2 * h, color=[0, 0, 1, 0.1])\n', (16535, 16591), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((16636, 16688), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['side1'], {'z': '(m_y - w)', 'zdir': '"""y"""'}), "(side1, z=m_y - w, zdir='y')\n", (16660, 16688), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((18216, 18226), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18224, 18226), True, 'import matplotlib.pyplot as plt\n'), ((7932, 8026), 'numpy.sqrt', 'np.sqrt', (['(a * a * self.dx * self.dx + b * b * self.dy * self.dy + c * c * self.dz *\n self.dz)'], {}), '(a * a * self.dx * self.dx + b * b * self.dy * self.dy + c * c *\n self.dz * self.dz)\n', (7939, 8026), True, 'import numpy as np\n')] |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
def plot_2ala_ramachandran(traj, ax=None, weights=None):
import mdtraj as md
if ax == None:
ax = plt.gca()
if isinstance(weights, np.ndarray):
ax.hist2d(
md.compute_phi(traj)[1].reshape(-1),
md.compute_psi(traj)[1].reshape(-1),
bins=[np.linspace(-np.pi, np.pi, 64), np.linspace(-np.pi, np.pi, 64)],
norm=mpl.colors.LogNorm(),
weights=weights,
)
else:
ax.hist2d(
md.compute_phi(traj)[1].reshape(-1),
md.compute_psi(traj)[1].reshape(-1),
bins=[np.linspace(-np.pi, np.pi, 64), np.linspace(-np.pi, np.pi, 64)],
norm=mpl.colors.LogNorm(),
)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi, np.pi)
ax.set_xlabel(r"$\phi$")
ax.set_ylabel(r"$\psi$")
| [
"matplotlib.pyplot.gca",
"mdtraj.compute_psi",
"numpy.linspace",
"mdtraj.compute_phi",
"matplotlib.colors.LogNorm"
] | [((192, 201), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (199, 201), True, 'import matplotlib.pyplot as plt\n'), ((460, 480), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {}), '()\n', (478, 480), True, 'import matplotlib as mpl\n'), ((748, 768), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {}), '()\n', (766, 768), True, 'import matplotlib as mpl\n'), ((378, 408), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(64)'], {}), '(-np.pi, np.pi, 64)\n', (389, 408), True, 'import numpy as np\n'), ((410, 440), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(64)'], {}), '(-np.pi, np.pi, 64)\n', (421, 440), True, 'import numpy as np\n'), ((666, 696), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(64)'], {}), '(-np.pi, np.pi, 64)\n', (677, 696), True, 'import numpy as np\n'), ((698, 728), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(64)'], {}), '(-np.pi, np.pi, 64)\n', (709, 728), True, 'import numpy as np\n'), ((274, 294), 'mdtraj.compute_phi', 'md.compute_phi', (['traj'], {}), '(traj)\n', (288, 294), True, 'import mdtraj as md\n'), ((323, 343), 'mdtraj.compute_psi', 'md.compute_psi', (['traj'], {}), '(traj)\n', (337, 343), True, 'import mdtraj as md\n'), ((562, 582), 'mdtraj.compute_phi', 'md.compute_phi', (['traj'], {}), '(traj)\n', (576, 582), True, 'import mdtraj as md\n'), ((611, 631), 'mdtraj.compute_psi', 'md.compute_psi', (['traj'], {}), '(traj)\n', (625, 631), True, 'import mdtraj as md\n')] |
import autograd.numpy as anp
import numpy as np
from numpy import linalg as LA
from pymoo.util.misc import stack
from pymoo.model.problem import Problem
class Lamp(Problem):
def __init__(self, focal_z):
super().__init__(n_var=21,
n_obj=3,
n_constr=0,
xl=anp.array(21*[0]),
xu=anp.array(21*[1]))
# other constants
self.bMax = 4.0;
self.aMax = 2.0;
self.hMax = 1.0;
self.wMax = 0.05;
self.endVolume = 0.3**3;
self.radii = np.ones([4])*self.wMax*2;
scale = self.bMax/2+self.aMax; ## TODO: we can't achieve anything taller
# set the context variable
self.focalPoint_z = self.lerp(focal_z, 1, 5) * scale;
self.focalPoint = np.array([2*scale, 2*scale, self.focalPoint_z]);
def _evaluate(self, x, out, *args, **kwargs):
scaledx = self.rescaleParams(x)
base = scaledx[:, 0:3]
a1 = scaledx[:, 3:6]
a2 = scaledx[:, 6:9]
a3 = scaledx[:, 9:12]
h1 = scaledx[:, 12:15]
h2 = scaledx[:, 15:18]
h3 = scaledx[:, 18:21]
f1 = self.f1(base, a1, a2, a3, h1, h2, h3)
f2 = self.f2(base, a1, a2, a3, h1, h2, h3)
f3 = self.f3(base, a1, a2, a3, h1, h2, h3)
out["F"] = anp.column_stack([f1, f2, f3])
# instability
def f1(self, base, a1, a2, a3, h1, h2, h3):
COM = self.get_centerOfMass(base, a1, a2, a3, h1, h2, h3);
f1 = COM[:, 0]**2 + COM[:, 1]**2;
# normalize so all possible outputs are between 0 and 1
f1 = f1 / 15.0; #observed via jscad
return f1
# mass
def f2(self, base, a1, a2, a3, h1, h2, h3):
minMass = (LA.norm(np.array([0.0,0.0,1.0]))*(self.aMax*3 + self.bMax) + LA.norm(np.array([0.4,0.4,1.0]) * self.hMax*3) )*2*(self.wMax);
maxMass = LA.norm(np.array([1.0,1.0,2.0]))*(self.hMax*3 + self.aMax*3 + self.bMax)*(2*self.wMax);
mass = self.get_mass(base, a1, a2, a3, h1, h2, h3);
# normalize so all possible outputs are between 0 and 1
f2 = (mass -minMass)/(maxMass - minMass);
return f2
# distance to focal point
def f3(self, base, a1, a2, a3, h1, h2, h3):
# average distance to the focal point
numpts = base.shape[0];
focalRep = self.repRow(self.focalPoint, numpts);
endPosCost = self.rownorm(base+a1+h1 -focalRep) \
+ self.rownorm(base+a2+h2 -focalRep) \
+ self.rownorm(base+a3+h3 -focalRep);
endPosCost = endPosCost / 3.0;
#normalize so outputs between 0 and 1; estimated in jscad viewer
f3 = endPosCost/20.0;#(endPosCost/3- 10)/ 8;
return f3
def get_mass(self, base, a1, a2, a3, h1, h2, h3):
barMasses = self.get_elementMasses(base, a1, a2, a3, h1, h2, h3);
# undo the square to match Schulz et al implementation
barMasses[:, 0] = barMasses[:, 0] / self.radii[0];
barMasses[:, 1] = barMasses[:, 1] / self.radii[1];
barMasses[:, 2] = barMasses[:, 2] / self.radii[2];
barMasses[:, 3] = barMasses[:, 3] / self.radii[3];
barMasses[:, 4] = barMasses[:, 4] / self.radii[1];
barMasses[:, 5] = barMasses[:, 5] / self.radii[2];
barMasses[:, 6] = barMasses[:, 6] / self.radii[3];
# totalMass = self.rowsum(barMasses) + 3*self.endVolume;
return self.rowsum(barMasses);
def get_elementMasses(self, base, a1, a2, a3, h1, h2, h3):
numpts = base.shape[0];
masses = np.zeros([numpts, 7]); # one column for each element
masses[:, 0] = self.rownorm(base) * self.radii[0]**2;
masses[:, 1] = self.rownorm(a1) * self.radii[1]**2;
masses[:, 2] = self.rownorm(a2) * self.radii[2]**2;
masses[:, 3] = self.rownorm(a3) * self.radii[3]**2;
masses[:, 4] = self.rownorm(h1) * self.radii[1]**2;
masses[:, 5] = self.rownorm(h2) * self.radii[2]**2;
masses[:, 6] = self.rownorm(h3) * self.radii[3]**2;
return masses
def get_centerOfMass(self, base, a1, a2, a3, h1, h2, h3):
masses = self.get_elementMasses(base, a1, a2, a3, h1, h2, h3);
totalMass = self.rowsum(masses) + 3*self.endVolume;
centerOfMass = base * 0.5 * self.repCol(masses[:, 0], 3) \
+ (base + a1*0.5) * self.repCol(masses[:, 1], 3) \
+ (base + a2*0.5) * self.repCol(masses[:, 2], 3) \
+ (base + a3*0.5) * self.repCol(masses[:, 3], 3) \
+ (base + a1 + h1*0.5) * self.repCol(masses[:, 4], 3) \
+ (base + a2 + h2*0.5) * self.repCol(masses[:, 5], 3) \
+ (base + a3 + h3*0.5) * self.repCol(masses[:, 6], 3) \
+ (base + a1 + h1) * self.endVolume \
+ (base + a2 + h2) * self.endVolume \
+ (base + a3 + h3) * self.endVolume;
return centerOfMass / self.repCol(totalMass, 3);
def repCol(self, vec, numcols): # creates an nxCols matrix for elementwise mult
numrows = vec.shape[0];
a = vec;
for i in range(0, numcols - 1):
a = np.concatenate((a, vec));
a = np.reshape(a, [numrows, numcols], 'F'); # 'F' treats it as a column-major ordering
return a
def repRow(self, vec, numrows): # creates an nx3 matrix for elementwise mult
numcols = vec.shape[0];
a = vec;
for i in range(0, numrows - 1):
a = np.concatenate((a, vec));
a = np.reshape(a, [numrows, numcols], 'C'); # 'C' treats it as a row-major ordering
return a
def rownorm(self, mat):
return LA.norm(mat, axis=1);
def rowsum(self, mat):
return np.sum(mat, axis=1);
# ====== parameter setup
def rescaleParams(self, x):
scaledx = np.empty_like(x)
#base
scaledx[:, 0] = self.lerp(x[:, 0], 0, 1) * self.bMax;
scaledx[:, 1] = self.lerp(x[:, 1], 0, 1) * self.bMax;
scaledx[:, 2] = self.lerp(x[:, 2], 1, self.bMax) * self.bMax;
#arm 1
scaledx[:, 3] = self.lerp(x[:, 3], 0, 1) * self.aMax;
scaledx[:, 4] = self.lerp(x[:, 4], 0, 1) * self.aMax;
scaledx[:, 5] = self.lerp(x[:, 5], 1, 2) * self.aMax;
#arm 2
scaledx[:, 6] = self.lerp(x[:, 6], -1, 0) * self.aMax;
scaledx[:, 7] = self.lerp(x[:, 7], 0, 1) * self.aMax;
scaledx[:, 8] = self.lerp(x[:, 8], 1, 2) * self.aMax;
#arm 3
scaledx[:, 9] = self.lerp(x[:, 9], 0, 1) * self.aMax;
scaledx[:, 10] = self.lerp(x[:, 10], -1, 0) * self.aMax;
scaledx[:, 11] = self.lerp(x[:, 11], 1, 2) * self.aMax;
#hand 1
scaledx[:, 12] = self.lerp(x[:, 12], 0.4, 1) * self.hMax;
scaledx[:, 13] = self.lerp(x[:, 13], 0.4, 1) * self.hMax;
scaledx[:, 14] = self.lerp(x[:, 14], -2, -1) * self.hMax;
#hand 2
scaledx[:, 15] = self.lerp(x[:, 15], -1, -0.4) * self.hMax;
scaledx[:, 16] = self.lerp(x[:, 16], 0.4, 1) * self.hMax;
scaledx[:, 17] = self.lerp(x[:, 17], -2, -1) * self.hMax;
#hand 3
scaledx[:, 18] = self.lerp(x[:, 18], 0.4, 1) * self.hMax;
scaledx[:, 19] = self.lerp(x[:, 19], -1, -0.4) * self.hMax;
scaledx[:, 20] = self.lerp(x[:, 20], -2, -1) * self.hMax;
return scaledx
def lerp(self, t, min, max):
return min + t*(max-min)
| [
"numpy.reshape",
"numpy.ones",
"autograd.numpy.column_stack",
"autograd.numpy.array",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty_like",
"numpy.concatenate",
"numpy.linalg.norm"
] | [((673, 724), 'numpy.array', 'np.array', (['[2 * scale, 2 * scale, self.focalPoint_z]'], {}), '([2 * scale, 2 * scale, self.focalPoint_z])\n', (681, 724), True, 'import numpy as np\n'), ((1124, 1154), 'autograd.numpy.column_stack', 'anp.column_stack', (['[f1, f2, f3]'], {}), '([f1, f2, f3])\n', (1140, 1154), True, 'import autograd.numpy as anp\n'), ((3118, 3139), 'numpy.zeros', 'np.zeros', (['[numpts, 7]'], {}), '([numpts, 7])\n', (3126, 3139), True, 'import numpy as np\n'), ((4531, 4569), 'numpy.reshape', 'np.reshape', (['a', '[numrows, numcols]', '"""F"""'], {}), "(a, [numrows, numcols], 'F')\n", (4541, 4569), True, 'import numpy as np\n'), ((4814, 4852), 'numpy.reshape', 'np.reshape', (['a', '[numrows, numcols]', '"""C"""'], {}), "(a, [numrows, numcols], 'C')\n", (4824, 4852), True, 'import numpy as np\n'), ((4940, 4960), 'numpy.linalg.norm', 'LA.norm', (['mat'], {'axis': '(1)'}), '(mat, axis=1)\n', (4947, 4960), True, 'from numpy import linalg as LA\n'), ((4996, 5015), 'numpy.sum', 'np.sum', (['mat'], {'axis': '(1)'}), '(mat, axis=1)\n', (5002, 5015), True, 'import numpy as np\n'), ((5088, 5104), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (5101, 5104), True, 'import numpy as np\n'), ((4499, 4523), 'numpy.concatenate', 'np.concatenate', (['(a, vec)'], {}), '((a, vec))\n', (4513, 4523), True, 'import numpy as np\n'), ((4782, 4806), 'numpy.concatenate', 'np.concatenate', (['(a, vec)'], {}), '((a, vec))\n', (4796, 4806), True, 'import numpy as np\n'), ((276, 295), 'autograd.numpy.array', 'anp.array', (['(21 * [0])'], {}), '(21 * [0])\n', (285, 295), True, 'import autograd.numpy as anp\n'), ((303, 322), 'autograd.numpy.array', 'anp.array', (['(21 * [1])'], {}), '(21 * [1])\n', (312, 322), True, 'import autograd.numpy as anp\n'), ((462, 474), 'numpy.ones', 'np.ones', (['[4]'], {}), '([4])\n', (469, 474), True, 'import numpy as np\n'), ((1639, 1664), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 2.0])\n', (1647, 1664), True, 'import numpy as np\n'), ((1501, 1526), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (1509, 1526), True, 'import numpy as np\n'), ((1562, 1587), 'numpy.array', 'np.array', (['[0.4, 0.4, 1.0]'], {}), '([0.4, 0.4, 1.0])\n', (1570, 1587), True, 'import numpy as np\n')] |
"""plotlib.py: Module is used to plotting tools"""
__author__ = "<NAME>."
__copyright__ = ""
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import matplotlib as mpl
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.style.use(["science", "ieee"])
# import sys
# sys.path.extend(["py/", "py/config/"])
from .utils import *
import numpy as np
from scipy.stats import pearsonr
class Summary(object):
"""
Summary plots for the analysis data
"""
def __init__(self, nrows=1, ncols=1, dpi=180, size=(5, 5)):
self.nrows = nrows
self.ncols = ncols
self.dpi = dpi
self.size = (size[0] * self.ncols, size[1] * self.nrows)
self.fig = plt.figure(dpi=dpi, figsize=size)
self.fnum = 0
return
def add_axes(self):
self.fnum += 1
ax = self.fig.add_subplot(self.nrows, self.ncols, self.fnum)
return ax
def save(self, fname):
self.fig.subplots_adjust(wspace=0.7, hspace=0.7)
self.fig.savefig(fname, bbox_inches="tight")
return
def close(self):
plt.close()
return
class BfieldSummary(Summary):
"""
B-Field summary plot
"""
def __init__(self, nrows=2, ncols=2, dpi=180, size=(5, 5)):
super().__init__(nrows, ncols, dpi, size)
return
def add_Bfield_Seq(self, B, E):
"""
Add synthetic B-field data
"""
ylim = [(int(np.min(B.X / 10)) - 1) * 10, (int(np.max(B.X / 10)) + 1) * 10]
xlim = [np.min(B.dTime / 3600.0), np.max(B.dTime / 3600.0)]
ax = self.add_axes()
ax.plot(B.dTime / 3600.0, B.X, ls="-", lw=0.8)
ax.set_xlabel("Time, Hours")
ax.set_ylabel("B-Field, nT")
ax.set_ylim(ylim)
ax.set_xlim(xlim)
ax = ax.twinx()
ax.plot(E.dTime / 3600.0, E.X, color="r", ls="-", lw=0.8)
ax.set_ylabel("E-Field, mv/km", color="r")
ylim = [(int(np.min(E.X / 10)) - 1) * 10, (int(np.max(E.X / 10)) + 1) * 10]
ax.set_ylim(ylim)
ax.set_xlim(xlim)
return ax
def add_Es(self, Ea, En):
ax = self.add_axes()
ax.plot(Ea, En, "ko", ms=0.1, alpha=0.4)
ax.set_xlabel(r"$E^{anl}(t)$")
ax.set_ylabel(r"$E^{fft}(t)$")
ax.plot([0, 1], [0, 1], transform=ax.transAxes, color="r", ls="--", lw=0.8)
r, _ = pearsonr(Ea, En)
ax.text(
0.1,
0.9,
r"$\rho=$%.10f" % r,
va="center",
ha="left",
transform=ax.transAxes,
)
# ax.set_xlim([-15, 15])
# ax.set_ylim([-15, 15])
return ax
def add_TK_param(self, tf):
ax = self.add_axes()
ax.semilogx(tf.freq, np.abs(tf.E2B), "k", lw=0.4)
ax.set_xlim(1e-4, 1e-2)
ax.set_ylabel(r"$|X(f)|$")
ax.set_xlabel(r"$f_0$, Hz")
ax = ax.twinx()
ax.semilogx(tf.freq, np.angle(tf.E2B, deg=True), "r", lw=0.4)
ax.set_xlim(1e-4, 1e-2)
ax.set_ylabel(r"$\theta[X(f)]$", color="r")
return ax
class AnalysisSummary(Summary):
"""
Simulation summary plots
"""
def __init__(self, nrows=2, ncols=2, dpi=180, size=(5, 5)):
super().__init__(nrows, ncols, dpi, size)
return
def potential_along_section(
V, x, pname, sec=None, Vi=None, Vk=None, Z=None, Y=None, gma=None, Z0=None
):
mpl.rcParams.update({"xtick.labelsize": 12, "ytick.labelsize": 12, "font.size": 12})
fig, axes = plt.subplots(
nrows=1, ncols=1, dpi=150, figsize=(6, 3), sharex="all", sharey="all"
)
ax = axes
ax.set_ylabel("Voltage, V")
ax.set_xlabel("Cable Length, km")
ax.plot(x, V, "k", lw=0.8, ls="-")
if Z is not None:
Z *= 1e3
if Y is not None:
Y *= 1e3
if gma is not None:
gma *= 1e3
txt = ""
if sec is not None:
txt += "Along: Bin%02d\n" % (sec)
if (Vi is not None) and (Vk is not None):
txt += r"$V_i,V_k\sim %.1f V, %.1f V$" % (Vi, Vk) + "\n"
if (Z is not None) and (Y is not None):
txt += (
r"$Z,Y\sim$ %s $\Omega/km$, %s $\mho/km$" % (frexp102str(Z), frexp102str(Y))
+ "\n"
)
if (gma is not None) and (Z0 is not None):
txt += (
r"$\gamma,Z_0\sim$ %s /km, %s $\Omega$"
% (frexp102str(gma), frexp102str(Z0))
+ "\n"
)
txt += "L=%d km" % np.max(x)
ax.text(
0.05, 0.95, txt, ha="left", va="top", transform=ax.transAxes, fontsize="small"
)
ax.set_xlim(x[0], x[-1])
fig.savefig(pname, bbox_inches="tight")
return
def cable_potential(V, x, pname):
mpl.rcParams.update({"xtick.labelsize": 12, "ytick.labelsize": 12, "font.size": 12})
fig, axes = plt.subplots(
nrows=1, ncols=1, dpi=150, figsize=(6, 3), sharex="all", sharey="all"
)
ax = axes
ax.set_ylabel("Voltage, V")
ax.set_xlabel("Cable Length, km")
ax.plot(x, V, "k", lw=0.8, ls="-")
ax.set_xlim(x[0], x[-1])
txt = ""
ax.text(
0.05, 0.95, txt, ha="left", va="top", transform=ax.transAxes, fontsize="small"
)
fig.savefig(pname, bbox_inches="tight")
return
| [
"numpy.abs",
"matplotlib.rcParams.update",
"matplotlib.use",
"matplotlib.pyplot.style.use",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.angle",
"matplotlib.pyplot.figure",
"scipy.stats.pearsonr",
"matplotlib.pyplot.subplots"
] | [((270, 291), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (284, 291), False, 'import matplotlib\n'), ((325, 359), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['science', 'ieee']"], {}), "(['science', 'ieee'])\n", (338, 359), True, 'import matplotlib.pyplot as plt\n'), ((3478, 3566), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'xtick.labelsize': 12, 'ytick.labelsize': 12, 'font.size': 12}"], {}), "({'xtick.labelsize': 12, 'ytick.labelsize': 12,\n 'font.size': 12})\n", (3497, 3566), True, 'import matplotlib as mpl\n'), ((3579, 3666), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'dpi': '(150)', 'figsize': '(6, 3)', 'sharex': '"""all"""', 'sharey': '"""all"""'}), "(nrows=1, ncols=1, dpi=150, figsize=(6, 3), sharex='all',\n sharey='all')\n", (3591, 3666), True, 'import matplotlib.pyplot as plt\n'), ((4748, 4836), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'xtick.labelsize': 12, 'ytick.labelsize': 12, 'font.size': 12}"], {}), "({'xtick.labelsize': 12, 'ytick.labelsize': 12,\n 'font.size': 12})\n", (4767, 4836), True, 'import matplotlib as mpl\n'), ((4849, 4936), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'dpi': '(150)', 'figsize': '(6, 3)', 'sharex': '"""all"""', 'sharey': '"""all"""'}), "(nrows=1, ncols=1, dpi=150, figsize=(6, 3), sharex='all',\n sharey='all')\n", (4861, 4936), True, 'import matplotlib.pyplot as plt\n'), ((796, 829), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': 'dpi', 'figsize': 'size'}), '(dpi=dpi, figsize=size)\n', (806, 829), True, 'import matplotlib.pyplot as plt\n'), ((1185, 1196), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1194, 1196), True, 'import matplotlib.pyplot as plt\n'), ((2454, 2470), 'scipy.stats.pearsonr', 'pearsonr', (['Ea', 'En'], {}), '(Ea, En)\n', (2462, 2470), False, 'from scipy.stats import pearsonr\n'), ((4508, 4517), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (4514, 4517), True, 'import numpy as np\n'), ((1611, 1635), 'numpy.min', 'np.min', (['(B.dTime / 3600.0)'], {}), '(B.dTime / 3600.0)\n', (1617, 1635), True, 'import numpy as np\n'), ((1637, 1661), 'numpy.max', 'np.max', (['(B.dTime / 3600.0)'], {}), '(B.dTime / 3600.0)\n', (1643, 1661), True, 'import numpy as np\n'), ((2824, 2838), 'numpy.abs', 'np.abs', (['tf.E2B'], {}), '(tf.E2B)\n', (2830, 2838), True, 'import numpy as np\n'), ((3009, 3035), 'numpy.angle', 'np.angle', (['tf.E2B'], {'deg': '(True)'}), '(tf.E2B, deg=True)\n', (3017, 3035), True, 'import numpy as np\n'), ((1532, 1548), 'numpy.min', 'np.min', (['(B.X / 10)'], {}), '(B.X / 10)\n', (1538, 1548), True, 'import numpy as np\n'), ((1566, 1582), 'numpy.max', 'np.max', (['(B.X / 10)'], {}), '(B.X / 10)\n', (1572, 1582), True, 'import numpy as np\n'), ((2035, 2051), 'numpy.min', 'np.min', (['(E.X / 10)'], {}), '(E.X / 10)\n', (2041, 2051), True, 'import numpy as np\n'), ((2069, 2085), 'numpy.max', 'np.max', (['(E.X / 10)'], {}), '(E.X / 10)\n', (2075, 2085), True, 'import numpy as np\n')] |
"""
Copyright 2018 <NAME>, S.A.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import deque
import numpy as np
import random
import abc
class QLearningAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
# hyperparameters
self.gamma = 0.95 # discount rate on future rewards
self.epsilon = 1.0 # exploration rate
self.epsilon_decay = 0.995 # the decay of epsilon after each training batch
self.epsilon_min = 0.1 # the minimum exploration rate permissible
self.batch_size = 32 # maximum size of the batches sampled from memory
# agent state
self.model = self.build_model()
self.memory = deque(maxlen=2000)
@abc.abstractmethod
def build_model(self):
return None
def select_action(self, state, do_train=True):
if do_train and np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
return np.argmax(self.model.predict(state)[0])
def record(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def replay(self):
if len(self.memory) < self.batch_size:
return 0
minibatch = random.sample(self.memory, self.batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = (reward + self.gamma *
np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay | [
"random.sample",
"collections.deque",
"numpy.random.rand",
"random.randrange"
] | [((1244, 1262), 'collections.deque', 'deque', ([], {'maxlen': '(2000)'}), '(maxlen=2000)\n', (1249, 1262), False, 'from collections import deque\n'), ((1800, 1843), 'random.sample', 'random.sample', (['self.memory', 'self.batch_size'], {}), '(self.memory, self.batch_size)\n', (1813, 1843), False, 'import random\n'), ((1464, 1498), 'random.randrange', 'random.randrange', (['self.action_size'], {}), '(self.action_size)\n', (1480, 1498), False, 'import random\n'), ((1411, 1427), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1425, 1427), True, 'import numpy as np\n')] |
from __future__ import division, print_function, absolute_import
__author__ = '<NAME>'
import numpy
from hep_ml.losses import CompositeLossFunction, MSELossFunction
from pruning import greedy, utils
def test_pruner(mx_filename='../data/formula.mx', higgs_filename='../data/training.csv'):
with open(mx_filename, 'rb') as mx:
formula_mx = mx.read()
X, y, w = utils.get_higgs_data(higgs_filename)
X = numpy.array(X, dtype='float32')
pruner = greedy.GreedyPruner(loss_function=CompositeLossFunction(), iterations=5, n_kept_best=0)
pruner.prune(formula_mx, X, y, w, verbose=True)
pruner = greedy.GreedyPruner(loss_function=CompositeLossFunction(), iterations=5, n_kept_best=5)
pruner.prune(formula_mx, X, y, w, verbose=True)
pruner = greedy.GreedyPruner(loss_function=MSELossFunction(), iterations=5, n_kept_best=5)
pruner.prune(formula_mx, X, y, w, verbose=True)
def test_nesterov_pruner(mx_filename='../data/formula.mx', higgs_filename='../data/training.csv', iterations=30):
with open(mx_filename, 'rb') as mx:
formula_mx = mx.read()
X, y, w = utils.get_higgs_data(higgs_filename)
X = numpy.array(X, dtype='float32')
pruner = greedy.NesterovPruner(loss_function=MSELossFunction(), iterations=iterations, n_nesterov_steps=0)
pruner.prune(formula_mx, X, y, w, verbose=True)
pruner = greedy.NesterovPruner(loss_function=MSELossFunction(), iterations=iterations, n_nesterov_steps=1)
pruner.prune(formula_mx, X, y, w, verbose=True)
pruner = greedy.NesterovPruner(loss_function=MSELossFunction(), iterations=iterations, n_nesterov_steps=2)
pruner.prune(formula_mx, X, y, w, verbose=True)
assert 0 == 1
| [
"hep_ml.losses.CompositeLossFunction",
"pruning.utils.get_higgs_data",
"numpy.array",
"hep_ml.losses.MSELossFunction"
] | [((379, 415), 'pruning.utils.get_higgs_data', 'utils.get_higgs_data', (['higgs_filename'], {}), '(higgs_filename)\n', (399, 415), False, 'from pruning import greedy, utils\n'), ((424, 455), 'numpy.array', 'numpy.array', (['X'], {'dtype': '"""float32"""'}), "(X, dtype='float32')\n", (435, 455), False, 'import numpy\n'), ((1115, 1151), 'pruning.utils.get_higgs_data', 'utils.get_higgs_data', (['higgs_filename'], {}), '(higgs_filename)\n', (1135, 1151), False, 'from pruning import greedy, utils\n'), ((1160, 1191), 'numpy.array', 'numpy.array', (['X'], {'dtype': '"""float32"""'}), "(X, dtype='float32')\n", (1171, 1191), False, 'import numpy\n'), ((504, 527), 'hep_ml.losses.CompositeLossFunction', 'CompositeLossFunction', ([], {}), '()\n', (525, 527), False, 'from hep_ml.losses import CompositeLossFunction, MSELossFunction\n'), ((658, 681), 'hep_ml.losses.CompositeLossFunction', 'CompositeLossFunction', ([], {}), '()\n', (679, 681), False, 'from hep_ml.losses import CompositeLossFunction, MSELossFunction\n'), ((812, 829), 'hep_ml.losses.MSELossFunction', 'MSELossFunction', ([], {}), '()\n', (827, 829), False, 'from hep_ml.losses import CompositeLossFunction, MSELossFunction\n'), ((1242, 1259), 'hep_ml.losses.MSELossFunction', 'MSELossFunction', ([], {}), '()\n', (1257, 1259), False, 'from hep_ml.losses import CompositeLossFunction, MSELossFunction\n'), ((1406, 1423), 'hep_ml.losses.MSELossFunction', 'MSELossFunction', ([], {}), '()\n', (1421, 1423), False, 'from hep_ml.losses import CompositeLossFunction, MSELossFunction\n'), ((1570, 1587), 'hep_ml.losses.MSELossFunction', 'MSELossFunction', ([], {}), '()\n', (1585, 1587), False, 'from hep_ml.losses import CompositeLossFunction, MSELossFunction\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 19:39:26 2017
@author: PiotrTutak
"""
import numpy as np
from itertools import zip_longest
from operator import itemgetter
import random
import sys
"""
Różne funkcje aktywacji używane w testowaniu neuronu:
"""
def ident(x):
return float(x)
def rectifier(x):
return max(0,x)
def one(x):
return 1.0
def zero(x):
return 0.0
class Const:
def __call__(self,alfa):
def const(x):
return float(alfa)
const.__name__+='({0:.3f})'.format(alfa)
return const
def hardOne(x):
if x<0:
return 0.0
return 1.0
def hardSign(x):
if x<0:
return -1.0
return 1.0
def squash(x):
if x<-1:
return -1.0
elif x>1:
return 1.0
return x
class Sigm:
def __call__(self,alfa):
def sigm(x):
return 1.0/(1.0+np.exp(-alfa*x))
sigm.__name__+='({0:.3f})'.format(alfa)
return sigm
def derivative(self,alfa):
def sigmDeriv(x):
return alfa*np.exp(-alfa*x)/((1.0+np.exp(-alfa*x))**2)
sigmDeriv.__name__+='({0:.3f})'.format(alfa)
return sigmDeriv
class SignSigm:
def __call__(self,alfa):
def signSigm(x):
return (2.0/(1.0+np.exp(-alfa*x)))-1.0
signSigm.__name__+='({0:.3f})'.format(alfa)
return signSigm
def derivative(self,alfa):
def signSigmDeriv(x):
return 2.0*alfa*np.exp(-alfa*x)/((1.0+np.exp(-alfa*x))**2)
signSigmDeriv.__name__+='({0:.3f})'.format(alfa)
return signSigmDeriv
#funkcja wypisująca zawartosc listy z zadaną precyzją
def listWithPrec(listA,prec):
ret="["
formatStr="{0: "+str(int(prec+3))+"."+str(int(prec))+"f}"
for x in listA:
ret+=formatStr.format(x)
ret+=","
ret=ret[:-1]+']'
return ret
#funkcje liczace wartosci błędów
def MSE(results,expected):
sum=0.0
for i in range(len(results)):
sum+=(results[i]-expected[i])**2
return sum/len(results)
def MAPE(results,expected):
sum=0.0
for i in range(len(results)):
sum+=abs((expected[i]-results[i])/results[i])
return 100*sum/len(results)
class Neuron:
"""
Klasa Neuron
"""
def __init__(self, weights, activFunc, activFuncDeriv, learnRate=0.1, bias=-0.5):
self.__dict__['_weights']=np.array(weights)
self.__dict__['_learnRate']=learnRate
self.__dict__['_activFunc']=activFunc
self.__dict__['_activFuncDeriv']=activFuncDeriv
self.__dict__['_bias']=bias
self.__dict__['_error']=None
self.__dict__['_inputValues']=None
self.__dict__['_val']=None
self.__dict__['_output']=None
def process(self,inputValues):
"""
Funkcja przetwarzająca dane wejsciowe na dane wyjsciowe
"""
if len(inputValues)!=len(self._weights):
raise TypeError('Wrong values length')
self.__dict__['_inputValues']=np.array(inputValues)
self.__dict__['_val']=np.dot(self._weights,self._inputValues)+self._bias
self.__dict__['_output']=self._activFunc(self._val)
return self._output
def propagateError(self,weights,errors):
"""
Funkcja propagująca błąd i korygująca wagi oraz bias
"""
weights=np.array(weights)
errors=np.array(errors)
if len(errors)!=len(weights):
raise TypeError('Wrong values length')
self.__dict__['_error']=np.dot(weights,errors)*self._activFuncDeriv(self._val)
if (self._learnRate):
for i in range(len(self._weights)):
self._weights[i]+=self._learnRate*self._error*self._inputValues[i]
self.__dict__['_bias']+=self._learnRate*self._error
return self._error
"""
Funkcje dostępowe:
"""
def __setitem__(self,index,value):
if index=='learnRate':
self.__dict__['_learnRate']=value
elif index=='activFunc':
self.__dict__['_activFunc']=value
def __getitem__(self,index):
if index=='error':
return self._error
elif index=='input':
return self._inputValues
elif index=='value':
return self._val
elif index=='learnRate':
return self._learnRate
return self._weights[index]
def __getattr__(self,attr):
raise AttributeError('get: No such attribute: %r'%attr)
def __setattr__(self,attr,value):
raise AttributeError('set: No such attribute: %r'%attr)
def __iter__(self):
return iter(self._weights)
def __len__(self):
return len(self._weights)
def __repr__(self):
w='['+','.join('{:8.5f}'.format(x) for x in self._weights)+']'
return 'Neuron(weights:{0},bias:{1:8.5f},learnRate:{2:.5f},activFunc:{3!s},activFuncDeriv:{4!s})'.format(w,self._bias,self._learnRate,self._activFunc.__name__,self._activFuncDeriv.__name__)
class Layer:
"""
Klasa wartstwy używana w wielowarstwowej sieci neuronowej.
"""
def __init__(self,inputNumber,neuronNumber,activFunc,activFuncDeriv,weights=None,learnRate=None,bias=None):
self.__dict__['_inputNumber']=inputNumber
self.__dict__['_neuronNumber']=neuronNumber
self.__dict__['_activFunc']=activFunc
self.__dict__['_activFuncDeriv']=activFuncDeriv
if weights!=None:
_weights=list(weights)
if inputNumber>len(_weights):
_weights.extend([0.8*np.random.ranf()+0.1*np.random.choice([-1.0,1.0]) for _ in range(inputNumber-len(_weights))])
else:
_weights=None
if learnRate!=None:
self.__dict__['_learnRate']=learnRate
else:
self.__dict__['_learnRate']=0.1
_bias=bias
if _weights:
if _bias!=None:
self.__dict__['_neurons']=[Neuron(_weights[:inputNumber],activFunc,activFuncDeriv,learnRate=self._learnRate,bias=_bias) for _ in range(neuronNumber)]
else:
self.__dict__['_neurons']=[Neuron(_weights[:inputNumber],activFunc,activFuncDeriv,learnRate=self._learnRate,bias=-0.08*np.random.ranf()-0.01) for _ in range(neuronNumber)]
else:
if _bias!=None:
self.__dict__['_neurons']=[Neuron([(0.08*np.random.ranf()+0.01)*np.random.choice([-1.0,1.0]) for _ in range(inputNumber)],activFunc,activFuncDeriv,learnRate=self._learnRate,bias=_bias) for _ in range(neuronNumber)]
else:
self.__dict__['_neurons']=[Neuron([(0.08*np.random.ranf()+0.01)*np.random.choice([-1.0,1.0]) for _ in range(inputNumber)],activFunc,activFuncDeriv,learnRate=self._learnRate,bias=-0.08*np.random.ranf()-0.01) for _ in range(neuronNumber)]
"""
Funkcje dostępowe
"""
def __len__(self):
return len(self._neurons)
def __getitem__(self,index):
if index=='learnRate':
return self._learnRate
return self._neurons[index]
def __iter__(self):
return iter(self._neurons)
def __setitem__(self,index,value):
if index=='learnRate':
self.__dict__['_learnRate']=value
for x in self._neurons:
x['learnRate']=value
elif index=='activFunc':
self.__dict__['_activFunc']=value
for x in self._neurons:
x['activFunc']=value
def __getattr__(self,attr):
raise AttributeError('get: No such attribute: %r'%attr)
def __setattr__(self,attr,value):
raise AttributeError('set: No such attribute: %r'%attr)
def __repr__(self):
result='Layer(inputNumber:{0}, neuronNumber:{1}, activFunc:{2!s}, activFuncDeriv:{3!s}, learnRate:{4:.5f})'\
.format(self._inputNumber,self._neuronNumber,self._activFunc.__name__,self._activFuncDeriv.__name__,self._learnRate)
return result
def __str__(self):
result=repr(self)+'\n'
for p in self:
result+=' '+str(p)+'\n'
return result
class Multilayer:
"""
Wielowarstwa z możliwoscią zaprogramwania indywidualnie każdej wartstwy
"""
def __init__(self,layers,activFuncs=None,activFuncDerivs=None,weights=[], learnRates=[], biases=[]):
if isinstance(layers[0],Layer):
self._layers=layers
elif isinstance(layers[0],int):
if not all([activFuncs,activFuncDerivs]):
raise TypeError('Missing activation functions or derivatives')
neuronNumbers=layers
l=zip_longest(neuronNumbers,activFuncs,activFuncDerivs,weights,learnRates,biases,fillvalue=None)
prev=next(l)
layerList=[Layer(1,*prev)]
for x in l:
layerList.append(Layer(prev[0],*x))
prev=x
self._layers=layerList
def process(self,inputValues):
"""
Funkcja przetwarzająca dane wejsciowe sieci na dane wyjsciowe
"""
inputValues=list(inputValues)
values=[]
for p in self._layers[0]:
values.append(p.process(inputValues[:len(p)]))
inputValues=inputValues[len(p):]
for layer in self._layers[1:]:
inputValues=values
values=[]
for p in layer:
values.append(p.process(inputValues))
return values
def learn(self,inputValues,expectedValues):
"""
Funkcja ucząca sieć neuronową po uprzednim nadaniu współczynników uczenia
dla każdej wartwy
"""
results=self.process(inputValues)
if len(results)!=len(expectedValues):
raise IndexError('wrong number of expected values')
results=iter(results)
lenExpectedValues=len(expectedValues)
expectedValues=iter(expectedValues)
errors=[]
for _ in range(lenExpectedValues):
errors.append([next(expectedValues)-next(results)])
weights=[[1] for _ in range(len(errors))]
for layer in reversed(self._layers):
newErrors=[]
oldWeights=[]
for p in layer:
oldWeights.append(p[:])
newErrors.append(p.propagateError(weights.pop(0),errors.pop(0)))
weights=list(zip(*oldWeights))
errors=[newErrors for x in range(len(weights))]
"""
Funkcje dostępowe
"""
def multiLearnRates(self,value):
for l in self._layers:
l['learnRate']*=value
def setLearnRates(self,value):
for l in self._layers:
l['learnRate']=value
def __getitem__(self,index):
return self._layers[index]
def __iter__(self):
return iter(self._layers)
def __repr__(self):
result='Multilayer:\n'
for layer in self._layers:
result+=' '+repr(layer)
result+='\n'
return result
def __str__(self):
result='Multilayer:\n'
for layer in self._layers:
result+=' '+str(layer)
result+='\n'
return result
if __name__=='__main__':
"""
Kod programu przeprowadzającego uczenie i testowanie neuronu
Wyjscie jest przekierowywane do pliku results.txt
"""
# STDOUT=sys.stdout
# f=open('results.txt','w');
# sys.stdout=f
SigmFactory=SignSigm()
print('Funkcja AND:')
inputData=(
((0,0),0),
((0,1),0),
((1,0),0),
((1,1),1)
)
for x in inputData:
print("data: {0}, expected: {1}".format(*x))
listPerc=[]
RES_NUMBER=100
while(len(listPerc)<RES_NUMBER):
w=[np.random.ranf()*np.random.choice([-1,1]) for _ in range(2)]
p=Neuron(w,hardOne,one,learnRate=np.random.ranf()*np.random.ranf()*np.random.ranf(),bias=np.random.ranf()*-1.0)
i=0
run=True
print(p)
print('iteration;bad')
while(run):
samples=list(inputData)
i+=1
while(run and samples):
inp=random.sample(samples,1).pop(0)
samples.remove(inp)
expected=inp[1]
result=p.process(inp[0])
p.propagateError([1],[expected-result])
bad=0
for data,expected in inputData:
r=p.process(data)
if r!=expected:
bad+=1
if bad==0:
run=False
print(i,bad,sep=';')
print(p)
#w=('initialWeights:['+','.join('{:8.5f}'.format(x) for x in w)+']')
listPerc.append((w,p[:],p['learnRate'],i))
for x in sorted(listPerc,key=itemgetter(2)):
print(*x,sep=';')
# sys.stdout=STDOUT
# f.close() | [
"random.sample",
"numpy.random.choice",
"itertools.zip_longest",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.random.ranf",
"operator.itemgetter"
] | [((2467, 2484), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (2475, 2484), True, 'import numpy as np\n'), ((3098, 3119), 'numpy.array', 'np.array', (['inputValues'], {}), '(inputValues)\n', (3106, 3119), True, 'import numpy as np\n'), ((3449, 3466), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (3457, 3466), True, 'import numpy as np\n'), ((3483, 3499), 'numpy.array', 'np.array', (['errors'], {}), '(errors)\n', (3491, 3499), True, 'import numpy as np\n'), ((3151, 3191), 'numpy.dot', 'np.dot', (['self._weights', 'self._inputValues'], {}), '(self._weights, self._inputValues)\n', (3157, 3191), True, 'import numpy as np\n'), ((3624, 3647), 'numpy.dot', 'np.dot', (['weights', 'errors'], {}), '(weights, errors)\n', (3630, 3647), True, 'import numpy as np\n'), ((13209, 13222), 'operator.itemgetter', 'itemgetter', (['(2)'], {}), '(2)\n', (13219, 13222), False, 'from operator import itemgetter\n'), ((8893, 8997), 'itertools.zip_longest', 'zip_longest', (['neuronNumbers', 'activFuncs', 'activFuncDerivs', 'weights', 'learnRates', 'biases'], {'fillvalue': 'None'}), '(neuronNumbers, activFuncs, activFuncDerivs, weights, learnRates,\n biases, fillvalue=None)\n', (8904, 8997), False, 'from itertools import zip_longest\n'), ((12125, 12141), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (12139, 12141), True, 'import numpy as np\n'), ((12142, 12167), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (12158, 12167), True, 'import numpy as np\n'), ((943, 960), 'numpy.exp', 'np.exp', (['(-alfa * x)'], {}), '(-alfa * x)\n', (949, 960), True, 'import numpy as np\n'), ((1114, 1131), 'numpy.exp', 'np.exp', (['(-alfa * x)'], {}), '(-alfa * x)\n', (1120, 1131), True, 'import numpy as np\n'), ((1534, 1551), 'numpy.exp', 'np.exp', (['(-alfa * x)'], {}), '(-alfa * x)\n', (1540, 1551), True, 'import numpy as np\n'), ((12262, 12278), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (12276, 12278), True, 'import numpy as np\n'), ((12284, 12300), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (12298, 12300), True, 'import numpy as np\n'), ((1136, 1153), 'numpy.exp', 'np.exp', (['(-alfa * x)'], {}), '(-alfa * x)\n', (1142, 1153), True, 'import numpy as np\n'), ((1342, 1359), 'numpy.exp', 'np.exp', (['(-alfa * x)'], {}), '(-alfa * x)\n', (1348, 1359), True, 'import numpy as np\n'), ((1556, 1573), 'numpy.exp', 'np.exp', (['(-alfa * x)'], {}), '(-alfa * x)\n', (1562, 1573), True, 'import numpy as np\n'), ((12228, 12244), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (12242, 12244), True, 'import numpy as np\n'), ((12245, 12261), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (12259, 12261), True, 'import numpy as np\n'), ((12522, 12547), 'random.sample', 'random.sample', (['samples', '(1)'], {}), '(samples, 1)\n', (12535, 12547), False, 'import random\n'), ((5736, 5752), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (5750, 5752), True, 'import numpy as np\n'), ((5757, 5786), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (5773, 5786), True, 'import numpy as np\n'), ((6613, 6642), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (6629, 6642), True, 'import numpy as np\n'), ((6864, 6893), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (6880, 6893), True, 'import numpy as np\n'), ((6435, 6451), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (6449, 6451), True, 'import numpy as np\n'), ((6984, 7000), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (6998, 7000), True, 'import numpy as np\n'), ((6590, 6606), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (6604, 6606), True, 'import numpy as np\n'), ((6841, 6857), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (6855, 6857), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
# from mpl_toolkits.basemap import Basemap
import xarray as xr
import re
from collections import OrderedDict
from datetime import datetime, timedelta
from scipy.spatial import cKDTree, KDTree
from pyproj import Proj
import numpy.ma as ma
import argparse
from glob import glob
import json
import os
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
deg2rad = np.pi / 180
NRANGE = {'LPRO': 90, 'SILL': 60, 'FIST': 60, 'VILA': 60, 'PRIO': 60}
dtypes = {"TIME": 'float64',
"DEPH": 'float32',
"BEAR": 'float32',
"RNGE": 'float32',
"LONGITUDE": 'float32',
"LATITUDE": 'float32',
"XDST": 'int32',
"YDST": 'int32',
"RDVA": 'int16',
"DRVA": 'int32',
"EWCT": 'int16',
"NSCT": 'int16',
"MAXV": 'int16',
"MINV": 'int16',
"ESPC": 'int16',
"ETMP": 'int16',
"ERSC": 'int16',
"ERTC": 'int16',
"SPRC": 'int16',
"NARX": 'int8',
"NATX": 'int8',
"SLTR": 'int32',
"SLNR": 'int32',
"SLTT": 'int16',
"SLNT": 'int16',
"TIME_QC": 'int8',
"POSITION_QC": 'int8',
"DEPH_QC": 'int8',
"QCflag": 'int8',
"OWTR_QC": 'int8',
"MDFL_QC": 'int8',
"VART_QC": 'int8',
"CSPD_QC": 'int8',
"AVRB_QC": 'int8',
"RDCT_QC": 'int8'}
scale_factors = {"XDST": 0.001,
"YDST": 0.001,
"RDVA": 0.001,
"DRVA": 0.001,
"EWCT": 0.001,
"NSCT": 0.001,
"ESPC": 0.001,
"ETMP": 0.001,
"MAXV": 0.001,
"MINV": 0.001,
"ERSC": 1,
"ERTC": 1,
"XDST": 0.001,
"YDST": 0.001,
"SPRC": 1,
"NARX": 1,
"NATX": 1,
"SLTR": 0.001,
"SLNR": 0.001,
"SLTT": 0.001,
"SLNT": 0.001,
"TIME_QC": 1,
"POSITION_QC": 1,
"DEPH_QC": 1,
"QCflag": 1,
"OWTR_QC": 1,
"MDFL_QC": 1,
"VART_QC": 1,
"CSPD_QC": 1,
"AVRB_QC": 1,
"RDCT_QC": 1}
add_offsets = {}
for key, value in scale_factors.items():
if isinstance(value, float):
scale_factors[key] = np.float32(scale_factors[key])
add_offsets[key] = np.float32(0)
else:
# Generamos un conversor de tipo a partir del tipo de la variable:
conversor = np.dtype(dtypes[key])
# Utilizamos el conversor para recodificar un tipo nativo de python a un escalar tipo numpy:
scale_factors[key] = np.int_(scale_factors[key]).astype(conversor)
add_offsets[key] = np.int_(0).astype(conversor)
_FillValues = {}
for key, value in dtypes.items():
if 'float' in value:
_FillValues[key] = np.finfo(dtypes[key]).min + 1
else:
_FillValues[key] = np.iinfo(dtypes[key]).min + 1
def rotate_vector(pr, uin, vin, lons, lats, returnxy=False):
"""
Rotate a vector field (``uin,vin``) on a rectilinear grid
with longitudes = ``lons`` and latitudes = ``lats`` from
geographical (lat/lon) into map projection (x/y) coordinates.
Differs from transform_vector in that no interpolation is done.
The vector is returned on the same grid, but rotated into
x,y coordinates.
The input vector field is defined in spherical coordinates (it
has eastward and northward components) while the output
vector field is rotated to map projection coordinates (relative
to x and y). The magnitude of the vector is preserved.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
uin, vin input vector field on a lat/lon grid.
lons, lats Arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cyl``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
============== ====================================================
Returns ``uout, vout`` (rotated vector field).
If the optional keyword argument
``returnxy`` is True (default is False),
returns ``uout,vout,x,y`` (where ``x,y`` are the map projection
coordinates of the grid defined by ``lons,lats``).
"""
# if lons,lats are 1d and uin,vin are 2d, and
# lats describes 1st dim of uin,vin, and
# lons describes 2nd dim of uin,vin, make lons,lats 2d
# with meshgrid.
if lons.ndim == lats.ndim == 1 and uin.ndim == vin.ndim == 2 and \
uin.shape[1] == vin.shape[1] == lons.shape[0] and \
uin.shape[0] == vin.shape[0] == lats.shape[0]:
lons, lats = np.meshgrid(lons, lats)
else:
if not lons.shape == lats.shape == uin.shape == vin.shape:
raise TypeError("shapes of lons,lats and uin,vin don't match")
x, y = pr(lons, lats)
# rotate from geographic to map coordinates.
if ma.isMaskedArray(uin):
mask = ma.getmaskarray(uin)
masked = True
uin = uin.filled(1)
vin = vin.filled(1)
else:
masked = False
# Map the (lon, lat) vector in the complex plane.
uvc = uin + 1j * vin
uvmag = np.abs(uvc)
theta = np.angle(uvc)
# Define a displacement (dlon, dlat) that moves all
# positions (lons, lats) a small distance in the
# direction of the original vector.
dc = 1E-5 * np.exp(theta * 1j)
dlat = dc.imag * np.cos(np.radians(lats))
dlon = dc.real
# Deal with displacements that overshoot the North or South Pole.
farnorth = np.abs(lats + dlat) >= 90.0
somenorth = farnorth.any()
if somenorth:
dlon[farnorth] *= -1.0
dlat[farnorth] *= -1.0
# Add displacement to original location and find the native coordinates.
lon1 = lons + dlon
lat1 = lats + dlat
xn, yn = pr(lon1, lat1)
# Determine the angle of the displacement in the native coordinates.
vecangle = np.arctan2(yn - y, xn - x)
if somenorth:
vecangle[farnorth] += np.pi
# Compute the x-y components of the original vector.
uvcout = uvmag * np.exp(1j * vecangle)
uout = uvcout.real
vout = uvcout.imag
if masked:
uout = ma.array(uout, mask=mask)
vout = ma.array(vout, mask=mask)
if returnxy:
return uout, vout, x, y
else:
return uout, vout
class Radial:
"""
Clase de abstracción para la lectura y procesamiento de ficheros radiales (.ruv)
Atributos
---------
Metodos
-------
"""
def __init__(self, fichero):
"""
Constructor
Parametros
----------
fichero: Fichero .ruv con las velocidades radiales
"""
# El archivo tiene que ser abierto como binary:
contenido = [linea.decode('utf-8').replace('%', '').replace('\n', '') for linea in
open(fichero, 'rb').readlines()
if '%%' not in str(linea)]
metadatos = [linea for linea in contenido if 'Table' not in linea]
metadatos = dict([(linea.split(':')[0], linea.split(':')[1]) for linea in metadatos if ':' in str(linea)])
# Parseamos algunos metadatos que necesitaremos:
self.Origin = np.array(metadatos['Origin'].split(), dtype=float)
self.RangeEnd = int(metadatos['RangeEnd'])
self.RangeResolutionKMeters = float(metadatos['RangeResolutionKMeters'])
self.AntennaBearing = float(metadatos['AntennaBearing'].replace('True', ''))
self.AngularResolution = float(metadatos['AngularResolution'].replace('Deg', ''))
self.TimeStamp = datetime.strptime(metadatos['TimeStamp'], ' %Y %m %d %H %M %S')
# Líneas inicial y final de las tablas:
starts = np.arange(len(contenido))[['TableStart' in linea for linea in contenido]]
ends = np.arange(len(contenido))[['TableEnd' in linea for linea in contenido]]
lengths = ends - starts - 1
# Linea que contiene el header:
columns = np.arange(len(contenido))[['TableColumnTypes' in linea for linea in contenido]]
tablas = []
# Aquí podemos aplicar los cambios en los nombres de las variables:
headers = [contenido[indice].split(':')[1].split() for indice in columns]
headers[0] = ['LOND', 'LATD', 'EWCT', 'NSCT', 'OWTR_QC', 'ESPC', 'ETMP', 'MAXV', 'MINV', 'ERSC', 'ERTC', 'XDST',
'YDST', 'RNGE', 'BEAR', 'RDVA', 'DRVA', 'SPRC']
## Originales: LOND LATD VELU VELV VFLG ESPC ETMP MAXV MINV ERSC ERTC XDST YDST RNGE BEAR VELO HEAD SPRC
for i in range(3):
if lengths[i] != 0:
start = starts[i] + 1
end = ends[i]
tablas.append(pd.DataFrame(np.array([linea.split() for linea in contenido[start:end]], dtype=float),
columns=headers[i]))
# Eventualmente pueden aparecer datos erroneos en estas variables:
tablas[0].ESPC[tablas[0].ESPC == 999.00] = np.nan
tablas[0].ETMP[tablas[0].ETMP == 999.00] = np.nan
# Aquí aplicamos los factores de conversión necesarios:
tablas[0].EWCT /= 100.
tablas[0].NSCT /= 100.
tablas[0].RDVA /= -100.
tablas[0].MINV /= -100.
tablas[0].MAXV /= -100.
tablas[0].ESPC /= 100.
tablas[0].ETMP /= 100.
tablas[0].ERSC /= 1.
tablas[0].ERTC /= 1.
tablas[0].SPRC /= 1.
self.metadatos = metadatos
self.tablas = tablas
def to_grid(self, grid):
# Busqueda cKDTree:
nearest = cKDTree(np.column_stack([grid.longitud.values.flatten(), grid.latitud.values.flatten()]))
puntos = np.column_stack([self.tablas[0].LOND.values, self.tablas[0].LATD.values])
distancias, vecinos = nearest.query(puntos)
variables = ['EWCT', 'NSCT', 'OWTR_QC', 'MINV', 'MAXV', 'RDVA', 'DRVA', 'ESPC', 'ETMP', 'ERSC', 'ERTC', 'SPRC']
self.variables = OrderedDict()
# Complete list of coordinates:
delta = self.TimeStamp - datetime(1950, 1, 1)
self.variables['TIME'] = xr.DataArray([delta.days + delta.seconds / 86400], dims={'TIME': 1})
self.variables['DEPH'] = xr.DataArray([0.], dims={'DEPTH': 1})
for variable in variables:
# Create the matrix to be filled with data:
tmp = np.ones_like(grid.longitud.values.flatten()) * np.nan
# Set nearest neighbours:
tmp[vecinos] = self.tablas[0][variable]
# Back to original shape:
tmp = tmp.reshape(grid.longitud.shape)
# Creamos el DataArray:
if variable in ['EWCT', 'NSCT', 'OWTR_QC', 'MINV', 'MAXV', 'RDVA', 'DRVA', 'ESPC', 'ETMP', 'ERSC', 'ERTC',
'SPRC']:
# Crecemos en DEPTH:
tmp = np.expand_dims(tmp, axis=0)
# Crecemos en TIME:
tmp = np.expand_dims(tmp, axis=0)
self.variables[variable] = xr.DataArray(tmp,
dims={'TIME': 1, 'DEPTH': 1, 'BEAR': grid.nBEAR,
'RNGE': grid.nRNGE},
coords={'TIME': self.variables['TIME'],
'DEPH': self.variables['DEPH'], 'BEAR': grid.BEAR,
'RNGE': grid.RNGE, 'LONGITUDE': grid.longitud,
'LATITUDE': grid.latitud,
'XDST': grid.X, 'YDST': grid.Y})
# Encoding de las variables en el fichero:
self.variables[variable].encoding["scale_factor"] = scale_factors[variable]
self.variables[variable].encoding["add_offset"] = add_offsets[variable]
self.variables[variable].encoding["dtype"] = dtypes[variable]
self.variables[variable].encoding["_FillValue"] = _FillValues[variable]
def QC_control(self):
"""
Método para el control de calidad de los datos
Parametros
----------
Utiliza la lista de variables del objeto, que deben estar ya ongrid.
"""
# Construimos alias de las variables para no tener que reescribir mucho código:
bear = self.variables['RDVA'].BEAR * deg2rad
lond = self.variables['RDVA'].LONGITUDE
latd = self.variables['RDVA'].LATITUDE
X = self.variables['RDVA'].XDST
Y = self.variables['RDVA'].YDST
# owtr = self.variables['']
etmp = self.variables['ETMP']
head = self.variables['DRVA']
radVel = self.variables['RDVA']
owtr = self.variables['OWTR_QC']
# Time quality flag:
sdnTime_QCflag = 1
self.variables['TIME_QC'] = xr.DataArray(sdnTime_QCflag, dims={'TIME': 1},
coords={'TIME': self.variables['TIME']})
# Position quality flag:
sdnPosition_QCflag = radVel.copy() * 0 + 1
self.variables['POSITION_QC'] = sdnPosition_QCflag
# sdnPosition_QCflag = netcdf.getConstant('NC_FILL_BYTE').*int8(ones(size(velu,1),size(velu,2),1));
# sdnPosition_QCflag(velu~=netcdf.getConstant('NC_FILL_SHORT')) = 1;
# Depth quality flag
sdnDepth_QCflag = 1
self.variables['DEPH_QC'] = xr.DataArray(sdnTime_QCflag, dims={'TIME': 1},
coords={'TIME': self.variables['TIME']})
# Variance Threshold QC test
varVec = etmp ** 2
# Average Radial Bearing QC test:
# avgBear_HEAD = head.values[~np.isnan(head).values].mean() # avgBear_HEAD = mean(head(~isnan(head)));
avgBear = bear.values[~np.isnan(bear).values].mean() # avgBear = mean(bear(~isnan(bear)));
# Radial Count QC test:
radVectors = int((~np.isnan(radVel)).sum()) # radVectors = sum(sum(~isnan(radVel)));
# Over Water quality flags (cambiamos la codificación de la variable):
self.variables['OWTR_QC'].values = np.select([owtr.values == 0, owtr.values == 128, np.isnan(owtr)],
[1, 4, np.nan])
'''
% Over Water quality flags
overWater(owtr==0) = 1;
overWater(owtr==128) = 4;
'''
# Velocity Threshold quality flags
## Velocity Threshold QC test
maxspd_R = Radial_QC_params.VelThr # Dato basado en el netCDF # maxspd_R = Radial_QC_params.VelThr;
velThr = radVel.copy()
velThr.values = np.select([np.abs(radVel) <= maxspd_R, np.abs(radVel) > maxspd_R, np.isnan(radVel)],
[1, 4, np.nan])
self.variables['CSPD_QC'] = velThr
'''
velThr((abs(radVel) <= maxspd_R)) = 1
velThr((abs(radVel) > maxspd_R)) = 4
'''
# Variance Threshold quality flags:
varThr = varVec.copy()
varThr.values = np.select(
[varVec <= Radial_QC_params.VarThr, varVec > Radial_QC_params.VarThr, np.isnan(varVec)], [1, 4, np.nan])
'''
varThr((varVec > Radial_QC_params.VarThr)) = 4;
varThr((varVec <= Radial_QC_params.VarThr)) = 1;
'''
# Set the QC flag for the current hour to 0 (no QC performed)
tempDer = radVel.copy()
tempDer *= 0
self.variables['VART_QC'] = tempDer
# Median Filter QC test
radVelMedianFiltered = radVel.copy()
medFilt = radVel.copy()
condicion = ~np.isnan(radVel[0, 0])
nt, nd, nBear, nRange = radVel.shape
tmp, bear = np.meshgrid(radVel.RNGE, radVel.BEAR)
for i in range(nRange):
for j in range(nBear):
if condicion[j, i]:
refBear = bear[j, i]
# Condición de puntos que están a menos de una distancia dada:
ventana = np.sqrt((X - X[j, i]) ** 2 + (Y - Y[j, i]) ** 2) <= Radial_QC_params.MedFilt[0]
# Condición de distancia angular (con codigo para controlar el círculo):
dif = bear - refBear
dif[dif >= np.pi] -= 2 * np.pi
dif[dif < -np.pi] += 2 * np.pi
ventana &= np.abs(dif) <= Radial_QC_params.MedFilt[1]
# Los datos no pueden ser nan:
ventana &= condicion
radVelMedianFiltered[0, 0, j, i] = np.median(radVel[0, 0].values[ventana])
# print('Número de puntos: %i' % ventana.sum())
# print('Velocidad filtrada: %f' % radVelMedianFiltered[0,0,j,i])
medFilt[:] = np.select([np.abs(radVelMedianFiltered - radVel) <= Radial_QC_params.MedFilt[2],
np.abs(radVelMedianFiltered - radVel) > Radial_QC_params.MedFilt[2],
np.isnan(radVelMedianFiltered)], [1, 4, np.nan])
self.variables['MDFL_QC'] = medFilt
# Average Radial Bearing quality flag:
if ((avgBear >= Radial_QC_params.AvgRadBear[0]) & (avgBear <= Radial_QC_params.AvgRadBear[1])):
avgRadBear = 1
else:
avgRadBear = 4
self.variables['AVRB_QC'] = xr.DataArray(avgRadBear, dims={'TIME': 1}, coords={'TIME': self.variables['TIME']})
# Radial Count quality flag:
if (radVectors > Radial_QC_params.RadCnt):
radCount = 1
else:
radCount = 4
self.variables['RDCT_QC'] = xr.DataArray(avgRadBear, dims={'TIME': 1}, coords={'TIME': self.variables['TIME']})
# Populate the overall quality variable:
condicion = (self.variables['CSPD_QC'] == 1) & \
(self.variables['OWTR_QC'] == 1) & \
(self.variables['MDFL_QC'] == 1) & \
(self.variables['AVRB_QC'] == 1) & \
(self.variables['RDCT_QC'] == 1)
isNan = np.isnan(self.variables['CSPD_QC'])
self.variables['QCflag'] = self.variables['CSPD_QC'].copy()
self.variables['QCflag'].values = np.select([condicion & ~isNan, ~condicion & ~isNan, isNan], [1, 4, np.nan])
'''
if(velThr(ii,jj) ~= netcdf.getConstant('NC_FILL_BYTE'))
if((velThr(ii,jj) == 1) && (overWater(ii,jj) == 1) && (medFilt(ii,jj) == 1) && (avgRadBear == 1) && (radCount == 1))
overall(ii,jj) = 1;
else
overall(ii,jj) = 4;
'''
# Terminamos ajustando algunos parámetros de las variables:
for variable in ['TIME_QC', 'POSITION_QC', 'DEPH_QC', 'QCflag', 'OWTR_QC', 'MDFL_QC', 'VART_QC', 'CSPD_QC',
'AVRB_QC', 'RDCT_QC']:
# Encoding de las variables en el fichero:
self.variables[variable].encoding["scale_factor"] = scale_factors[variable]
self.variables[variable].encoding["add_offset"] = add_offsets[variable]
self.variables[variable].encoding["dtype"] = dtypes[variable]
self.variables[variable].encoding["_FillValue"] = _FillValues[variable]
def to_netcdf(self, path_out, fichero):
radar = re.findall("[A-Z]{4}", fichero.split('/')[-1])[0]
fecha = datetime.strptime('%s%s%s%s' % tuple(re.findall("\d+", fichero.split('/')[-1])), '%Y%m%d%H%M')
logging.info('Fichero: %s Radar: %s' % (fichero, radar))
# Info de la proyección:
self.variables['crs'] = xr.DataArray(np.int16(0), )
# Datos SDN:
SDN_EDMO_CODEs = {'PRIO': 4841, 'SILL': 2751, 'VILA': 4841, 'FIST': 2751, 'LPRO': 590}
self.variables['SDN_EDMO_CODE'] = xr.DataArray(np.int16([[SDN_EDMO_CODEs[radar]]]),
dims={'TIME': 1, 'MAXINST': 1})
cadena = b'HFR-Galicia'
n = len(cadena)
self.variables['SDN_CRUISE'] = xr.DataArray(np.array([cadena]), dims={'TIME': 1})
cadena = ('HFR-Galicia-%s' % radar).encode()
n = len(cadena)
self.variables['SDN_STATION'] = xr.DataArray(np.array([cadena]), dims={'TIME': 1})
cadena = ('HFR-Galicia-%s_%sZ' % (radar, self.TimeStamp.isoformat())).encode()
n = len(cadena)
self.variables['SDN_LOCAL_CDI_ID'] = xr.DataArray(np.array([cadena]), dims={'TIME': 1})
cadena = b'http://opendap.intecmar.gal/thredds/catalog/data/nc/RADAR_HF/Galicia/catalog.html'
n = len(cadena)
self.variables['SDN_REFERENCES'] = xr.DataArray(np.array([cadena]), dims={'TIME': 1})
cadena = b"<sdn_reference xlink:href=\"http://opendap.intecmar.gal/thredds/catalog/data/nc/RADAR_HF/Galicia/catalog.html\" xlink:role=\"\" xlink:type=\"URL\"/>"
n = len(cadena)
self.variables['SDN_XLINK'] = xr.DataArray(np.array([[cadena]]), dims={'TIME': 1, 'REFMAX': 1})
# Otras:
siteLat, siteLon = self.Origin
self.variables['SLTR'] = xr.DataArray([[siteLat]], dims={'TIME': 1, 'MAXSITE': 1})
self.variables['SLNR'] = xr.DataArray([[siteLon]], dims={'TIME': 1, 'MAXSITE': 1})
self.variables['SLTT'] = xr.DataArray([[siteLat]], dims={'TIME': 1, 'MAXSITE': 1})
self.variables['SLNT'] = xr.DataArray([[siteLon]], dims={'TIME': 1, 'MAXSITE': 1})
cadena = ('%s' % radar).encode()
n = len(cadena)
self.variables['SCDR'] = xr.DataArray(np.array([[cadena]]), dims={'TIME': 1, 'MAXSITE': 1})
self.variables['SCDT'] = xr.DataArray(np.array([[cadena]]), dims={'TIME': 1, 'MAXSITE': 1})
numSites = 1
self.variables['NARX'] = xr.DataArray([numSites], dims={'TIME': 1})
self.variables['NATX'] = xr.DataArray([numSites], dims={'TIME': 1})
for variable in ['SLTT', 'SLNT', 'SLTR', 'SLNR', 'NARX', 'NATX']:
# Encoding de las variables en el fichero:
self.variables[variable].encoding["scale_factor"] = scale_factors[variable]
self.variables[variable].encoding["add_offset"] = add_offsets[variable]
self.variables[variable].encoding["dtype"] = dtypes[variable]
self.variables[variable].encoding["_FillValue"] = _FillValues[variable]
# Generamos el xarra.Dataset. radial.variables contienen los xr.DataArray necesarios:
dataset = xr.Dataset(self.variables)
# Atributos globales:
## Leemos los atributos específicos de cada radar:
f = open('%s.json' % radar)
atributos = json.loads(f.read())
f.close()
## Atributos del fichero radial que serán sobreescritos con los datos del fichero radial:
atributos_fichero = ['AngularResolution', 'AntennaBearing', 'BraggHasSecondOrder', 'BraggSmoothingPoints',
'DopplerResolutionHzPerBin', 'FirstOrderCalc', 'FirstOrderMethod', 'MergeMethod',
'MergedCount',
'PatternAmplitudeCalculations', 'PatternAmplitudeCorrections', 'PatternMethod',
'PatternPhaseCalculations',
'PatternPhaseCorrections', 'PatternResolution', 'RadialBraggNoiseThreshold',
'RadialBraggPeakDropOff', 'RadialBraggPeakNull',
'RadialMinimumMergePoints', 'RadialMusicParameters', 'RangeEnd', 'RangeResolutionKMeters',
'RangeStart',
'ReferenceBearing', 'SpatialResolution', 'SpectraDopplerCells', 'SpectraRangeCells',
'TransmitBandwidthKHz',
'TransmitCenterFreqMHz', 'TransmitSweepRateHz', 'UUID']
## Creamos algunos atributos:
atributos['id'] = 'HFR-Galicia-%s_%sZ' % (radar, self.TimeStamp.isoformat())
atributos['time_coverage_start'] = '%sZ' % (self.TimeStamp - timedelta(minutes=30)).isoformat()
atributos['time_coverage_end'] = '%sZ' % (self.TimeStamp + timedelta(minutes=30)).isoformat()
ahora = datetime(*datetime.now().timetuple()[0:6]).isoformat()
atributos['date_created'] = '%sZ' % ahora
atributos['metadata_date_stamp'] = '%sZ' % ahora
atributos['date_modified'] = '%sZ' % ahora
atributos['date_issued'] = '%sZ' % ahora
atributos['history'] = '%s data collected. %s netCDF file created and sent to European HFR Node' % (
self.TimeStamp.isoformat(), ahora)
for atributo_fichero in atributos_fichero:
try:
atributos[atributo_fichero] = self.metadatos[atributo_fichero]
except:
logging.info('No puedo cargar el atributo --> %s del fichero radial' % atributo_fichero)
## ... y los insertamos
dataset.attrs = atributos
# Atributos de las variables:
f = open('variables.json')
atributos = json.loads(f.read())
f.close()
# Los tipos de los atributos valid_min/max son deserializados incorrectamente:
for var in dataset:
for key, value in atributos[var].items():
if isinstance(atributos[var][key], int):
# Generamos un conversor de tipo a partir del tipo de la variable:
conversor = np.dtype(dtypes[var])
# Utilizamos el conversor para recodificar un tipo nativo de python a un escalar tipo numpy:
atributos[var][key] = np.int_(atributos[var][key]).astype(conversor)
elif isinstance(atributos[var][key], list):
# Generamos un conversor de tipo a partir del tipo de la variable:
conversor = np.dtype(dtypes[var])
# Utilizamos el conversor para recodificar un tipo nativo de python a un escalar tipo numpy:
atributos[var][key] = np.array(atributos[var][key]).astype(conversor)
for var in dataset:
dataset[var].attrs = atributos[var]
# Completamos coordenadas y dimensiones que xArray procesa de forma automática una vez creado el xr.Dataset a partir del diccionario de variables:
for var in ['TIME', 'DEPH', 'BEAR', 'RNGE']:
dataset[var].encoding["dtype"] = dtypes[var]
dataset[var].encoding["_FillValue"] = None
dataset[var].attrs = atributos[var]
for var in ['LONGITUDE', 'LATITUDE']:
dataset[var].encoding["dtype"] = dtypes[var]
dataset[var].encoding["_FillValue"] = _FillValues[var]
dataset[var].attrs = atributos[var]
for var in ['XDST', 'YDST']:
dataset[var].encoding["scale_factor"] = scale_factors[var]
dataset[var].encoding["add_offset"] = add_offsets[var]
dataset[var].encoding["dtype"] = dtypes[var]
dataset[var].encoding["_FillValue"] = _FillValues[var]
dataset[var].attrs = atributos[var]
for var in ['DEPH', 'BEAR', 'RNGE', 'LONGITUDE', 'LATITUDE', 'XDST', 'YDST']:
# Generamos un conversor de tipo a partir del tipo de la variable:
conversor = np.dtype(dtypes[var])
# Utilizamos el conversor para recodificar un tipo nativo de python a un escalar tipo numpy:
dataset[var].attrs['valid_min'] = np.float_(atributos[var]['valid_min']).astype(conversor)
dataset[var].attrs['valid_max'] = np.float_(atributos[var]['valid_max']).astype(conversor)
# Escribimos el netCDF:
file_out = os.path.join(path_out, 'HFR-Galicia-%s_%s.nc')
dataset.reset_coords(drop=False).to_netcdf(file_out % (radar, fecha.strftime('%Y_%m_%d_%H%M')))
def __repr__(self):
return '<Radial class>'
class Radial_QC_params():
"""
Clase estática para contener los umbrales.
"""
VelThr = 1.2 # (m/s)
VarThr = 1. # (m2/s2?)
tempDer_Thr = 0
AvgRadBear = [0., 70.]
RadCnt = 100
MedFilt = [5000, 30 * np.pi / 180, 1] # 5km, 30 grados y 1m/s
class Grid:
"""
Clase para la generación de la malla para albergar los datos
Atributos
---------
longitud, latitud: Matriz con las longitudes y latitudes reconstruidas
Metodos
-------
"""
def __init__(self, radial, nBEAR=72, nRNGE=42):
"""
Constructor
Parametros
----------
radial: Objeto de la clase Radial
Parametros por defecto
----------------------
nBEAR: Número de direcciones
nRNGE: Número de distancias
"""
self.nBEAR, self.nRNGE = nBEAR, nRNGE
origen_lat, origen_lon = radial.Origin
# Escogemos una proyección. Tmercator está bien. La idea es trabajar en un plano:
# m = Basemap(llcrnrlon=-11.0, llcrnrlat=41.8, urcrnrlon=-8, urcrnrlat=44.5, resolution='h', projection='tmerc', lon_0=-8, lat_0=45)
# m = Basemap(llcrnrlon=-11.0, llcrnrlat=41.8, urcrnrlon=-8, urcrnrlat=44.5, resolution='h', projection='tmerc', lon_0=origen_lon, lat_0=origen_lat)
pr = Proj(
'+proj=tmerc +bR_a=6370997.0 +units=m +lat_0=42.0 +lon_0=-8.0 +x_0=249341.9581021159 +y_0=17861.19187674373')
# Necesito las coordenadas del origen y su proyección:
origen_x, origen_y = pr(origen_lon, origen_lat)
# Coordenadas polares de los puntos:
RangeResolutionKMeters = radial.RangeResolutionKMeters
AntennaBearing = radial.AntennaBearing
AngularResolution = radial.AngularResolution
# Radios:
RNGE = np.arange(nRNGE) * RangeResolutionKMeters * 1000
# Ángulos:
BEAR = np.arange(nBEAR) * AngularResolution + AntennaBearing
# BEAR = np.sort(BEAR%360)*deg2rad
BEAR = np.sort(BEAR % 360)
# Generamos la lista de vectores unitarios en las direcciones:
X, Y = rotate_vector(pr, np.sin(BEAR * deg2rad), np.cos(BEAR * deg2rad),
np.repeat(origen_lon, len(BEAR)), np.repeat(origen_lat, len(BEAR)))
X = np.array([RNGE * x + origen_x for x in X])
Y = np.array([RNGE * y + origen_y for y in Y])
# ... y las coordenadas esféricas reconstruidas:
longitud, latitud = pr(X, Y, inverse=True)
# Preparamos las variables para guardar (las queremos en km no en m y referidas al origen de coordenadas):
X -= origen_x
Y -= origen_y
X /= 1000
Y /= 1000
RNGE /= 1000
# Guardamos las coordenadas proyectadas para trabajar en el plano:
self.X = xr.DataArray(X, dims={'BEAR': nBEAR, 'RNGE': nRNGE}, coords={'BEAR': BEAR, 'RNGE': RNGE})
self.Y = xr.DataArray(Y, dims={'BEAR': nBEAR, 'RNGE': nRNGE}, coords={'BEAR': BEAR, 'RNGE': RNGE})
# ... que se guardan como xr.DataArray para su uso futuro en la definición de las variables:
self.longitud = \
xr.DataArray(longitud, dims={'BEAR': nBEAR, 'RNGE': nRNGE}, coords={'BEAR': BEAR, 'RNGE': RNGE})
self.latitud = \
xr.DataArray(latitud, dims={'BEAR': nBEAR, 'RNGE': nRNGE}, coords={'BEAR': BEAR, 'RNGE': RNGE})
# ... y las coordenadas polares de los puntos:
self.RNGE, self.BEAR = RNGE, BEAR
def __repr__(self):
return '<Grid class -> nBEAR: %i, nRNGE: %i>' % (self.nBEAR, self.nRNGE)
def VART_QC(ficheros):
datasets = [xr.open_dataset(fichero) for fichero in ficheros]
radiales = [dataset.RDVA[0, 0].values for dataset in datasets]
radVel2h, radVel1h, radVel = radiales
tempDer1h = np.full_like(radVel1h, 4)
tempDer_Thr = 1
condicion = np.abs(radVel - radVel1h) < tempDer_Thr
condicion &= np.abs(radVel2h - radVel1h) < tempDer_Thr
tempDer1h[condicion] = 1
condicion = np.isnan(radVel) | np.isnan(radVel2h)
tempDer1h[condicion] = 0
tempDer1h[np.isnan(radVel1h)] = np.nan
datasets[1].VART_QC.values[0, 0, :] = tempDer1h[:]
# Redefinimos overall quality variable para incluir los cambios recientes en VART_QC:
condicion = (datasets[1].variables['VART_QC'] == 1) & \
(datasets[1].variables['CSPD_QC'] == 1) & \
(datasets[1].variables['OWTR_QC'] == 1) & \
(datasets[1].variables['MDFL_QC'] == 1) & \
(datasets[1].variables['AVRB_QC'] == 1) & \
(datasets[1].variables['RDCT_QC'] == 1)
isNan = np.isnan(datasets[1].variables['CSPD_QC'])
datasets[1].variables['QCflag'].values = np.select([condicion & ~isNan, ~condicion & ~isNan, isNan], [1, 4, np.nan])
datasets[1].to_netcdf('%s_new.nc' % ficheros[1].split('.')[0])
def ruv2nc(path_in, path_out, fichero, station):
file_in = path_in + '/' + fichero
radar = re.findall("[A-Z]{4}", fichero.split('/')[-1])[0]
fecha = datetime.strptime('%s%s%s%s' % tuple(re.findall("\d+", fichero.split('/')[-1])), '%Y%m%d%H%M')
# Creamos el objeto radial para leer el fichero:
radial = Radial(file_in)
# Creamos la malla donde queremos inscribir la tabla:
grd = Grid(radial, nRNGE=NRANGE[station])
# Metemos la tabla en la malla:
radial.to_grid(grd)
# Generamos las variables de control de calidad:
radial.QC_control()
# Generamos el fichero NetCDF:
radial.to_netcdf(path_out, fichero)
file_out = os.path.join(path_out, 'HFR-Galicia-%s_%s.nc')
ficheros = [file_out % (radar, (fecha + timedelta(hours=-i)).strftime('%Y_%m_%d_%H%M')) for i in
range(3)]
condiciones = [os.path.isfile(fichero) for fichero in ficheros]
if np.all(condiciones):
logging.info('Procesando VART_QC en %s' % ficheros[1])
VART_QC(ficheros)
else:
logging.info('No VART_QC')
if __name__ == '__main__':
file = r'RDLm_SILL_2021_12_20_1000.ruv'
path_in = r'../datos/radarhf_tmp/ruv/SILL'
path_out = r'../datos/radarhf_tmp/nc/radial'
ruv2nc(path_in, path_out, file, 'SILL')
'''
plt.pcolormesh(grd.longitud,grd.latitud,radial.variables['RNGE'])
plt.grid()
plt.colorbar()
plt.show()
plt.pcolormesh(grd.longitud,grd.latitud,radial.variables['BEAR'])
plt.grid()
plt.colorbar()
plt.show()
plt.plot(grd.longitud,grd.latitud,'k.')
plt.plot(radial.tablas[0].LOND, radial.tablas[0].LATD,'r.')
plt.grid()
plt.show()
ax = plt.subplot(111, projection='polar')
ax.pcolormesh(grd.theta, grd.r,radial.variables['VELO'])
ax.grid()
plt.show()
test = './datos/netcdf/HFR-Galicia-PRIO_2020_08_01_1200.nc'
datos_test = xr.open_dataset(test)
# Representación con CartoPy:
land_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m',
edgecolor='k',
facecolor=cfeature.COLORS['land'])
land = cfeature.GSHHSFeature(scale='auto')
proyeccion = ccrs.LambertConformal(central_longitude=-15.0, central_latitude=40)
fig, ax = plt.subplots(1, 1, figsize=(9,6), subplot_kw=dict(projection=proyeccion))
ax.set_extent([-11, -5.8, 41.7, 45.4], crs=ccrs.PlateCarree())
#ax.stock_img()
ax.add_feature(land)
ax.add_feature(cfeature.BORDERS, edgecolor='gray')
# Solo soportado para PlateCarree y Mercator:
# gl = ax.gridlines(draw_labels=True, linewidth=1, color='black', alpha=0.5, linestyle='--')
gl = ax.gridlines(linewidth=1, color='black', alpha=0.5, linestyle='--')
gl.xlines = True
gl.ylines = True
gl.xlocator = mticker.FixedLocator(np.arange(-11,-4,1))
gl.ylocator = mticker.FixedLocator(np.arange(41,46,1))
cset = ax.pcolormesh(grd.longitud, grd.latitud, radial.variables['VELO'][0,0,:], transform=ccrs.PlateCarree())
#plt.show()
fig, ax = plt.subplots(1, 1, figsize=(9,6), subplot_kw=dict(projection=proyeccion))
ax.set_extent([-11, -5.8, 41.7, 45.4], crs=ccrs.PlateCarree())
#ax.stock_img()
ax.add_feature(land)
ax.add_feature(cfeature.BORDERS, edgecolor='gray')
# Solo soportado para PlateCarree y Mercator:
# gl = ax.gridlines(draw_labels=True, linewidth=1, color='black', alpha=0.5, linestyle='--')
gl = ax.gridlines(linewidth=1, color='black', alpha=0.5, linestyle='--')
gl.xlines = True
gl.ylines = True
gl.xlocator = mticker.FixedLocator(np.arange(-11,-4,1))
gl.ylocator = mticker.FixedLocator(np.arange(41,46,1))
cset = ax.pcolormesh(datos_test.LONGITUDE, datos_test.LATITUDE, -100*datos_test.RDVA[0,0,:], transform=ccrs.PlateCarree())
plt.show()
'''
| [
"numpy.radians",
"numpy.ma.getmaskarray",
"numpy.sqrt",
"numpy.column_stack",
"numpy.iinfo",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"datetime.timedelta",
"numpy.ma.isMaskedArray",
"logging.info",
"numpy.arange",
"datetime.datetime",
"numpy.select",
"numpy.full_like",
"numpy.sort"... | [((446, 541), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n", (465, 541), False, 'import logging\n'), ((5604, 5625), 'numpy.ma.isMaskedArray', 'ma.isMaskedArray', (['uin'], {}), '(uin)\n', (5620, 5625), True, 'import numpy.ma as ma\n'), ((5866, 5877), 'numpy.abs', 'np.abs', (['uvc'], {}), '(uvc)\n', (5872, 5877), True, 'import numpy as np\n'), ((5890, 5903), 'numpy.angle', 'np.angle', (['uvc'], {}), '(uvc)\n', (5898, 5903), True, 'import numpy as np\n'), ((6620, 6646), 'numpy.arctan2', 'np.arctan2', (['(yn - y)', '(xn - x)'], {}), '(yn - y, xn - x)\n', (6630, 6646), True, 'import numpy as np\n'), ((32296, 32321), 'numpy.full_like', 'np.full_like', (['radVel1h', '(4)'], {}), '(radVel1h, 4)\n', (32308, 32321), True, 'import numpy as np\n'), ((33134, 33176), 'numpy.isnan', 'np.isnan', (["datasets[1].variables['CSPD_QC']"], {}), "(datasets[1].variables['CSPD_QC'])\n", (33142, 33176), True, 'import numpy as np\n'), ((33223, 33298), 'numpy.select', 'np.select', (['[condicion & ~isNan, ~condicion & ~isNan, isNan]', '[1, 4, np.nan]'], {}), '([condicion & ~isNan, ~condicion & ~isNan, isNan], [1, 4, np.nan])\n', (33232, 33298), True, 'import numpy as np\n'), ((34044, 34090), 'os.path.join', 'os.path.join', (['path_out', '"""HFR-Galicia-%s_%s.nc"""'], {}), "(path_out, 'HFR-Galicia-%s_%s.nc')\n", (34056, 34090), False, 'import os\n'), ((34295, 34314), 'numpy.all', 'np.all', (['condiciones'], {}), '(condiciones)\n', (34301, 34314), True, 'import numpy as np\n'), ((2709, 2739), 'numpy.float32', 'np.float32', (['scale_factors[key]'], {}), '(scale_factors[key])\n', (2719, 2739), True, 'import numpy as np\n'), ((2767, 2780), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (2777, 2780), True, 'import numpy as np\n'), ((2888, 2909), 'numpy.dtype', 'np.dtype', (['dtypes[key]'], {}), '(dtypes[key])\n', (2896, 2909), True, 'import numpy as np\n'), ((5345, 5368), 'numpy.meshgrid', 'np.meshgrid', (['lons', 'lats'], {}), '(lons, lats)\n', (5356, 5368), True, 'import numpy as np\n'), ((5642, 5662), 'numpy.ma.getmaskarray', 'ma.getmaskarray', (['uin'], {}), '(uin)\n', (5657, 5662), True, 'import numpy.ma as ma\n'), ((6070, 6090), 'numpy.exp', 'np.exp', (['(theta * 1.0j)'], {}), '(theta * 1.0j)\n', (6076, 6090), True, 'import numpy as np\n'), ((6240, 6259), 'numpy.abs', 'np.abs', (['(lats + dlat)'], {}), '(lats + dlat)\n', (6246, 6259), True, 'import numpy as np\n'), ((6780, 6803), 'numpy.exp', 'np.exp', (['(1.0j * vecangle)'], {}), '(1.0j * vecangle)\n', (6786, 6803), True, 'import numpy as np\n'), ((6879, 6904), 'numpy.ma.array', 'ma.array', (['uout'], {'mask': 'mask'}), '(uout, mask=mask)\n', (6887, 6904), True, 'import numpy.ma as ma\n'), ((6920, 6945), 'numpy.ma.array', 'ma.array', (['vout'], {'mask': 'mask'}), '(vout, mask=mask)\n', (6928, 6945), True, 'import numpy.ma as ma\n'), ((8282, 8345), 'datetime.datetime.strptime', 'datetime.strptime', (["metadatos['TimeStamp']", '""" %Y %m %d %H %M %S"""'], {}), "(metadatos['TimeStamp'], ' %Y %m %d %H %M %S')\n", (8299, 8345), False, 'from datetime import datetime, timedelta\n'), ((10412, 10485), 'numpy.column_stack', 'np.column_stack', (['[self.tablas[0].LOND.values, self.tablas[0].LATD.values]'], {}), '([self.tablas[0].LOND.values, self.tablas[0].LATD.values])\n', (10427, 10485), True, 'import numpy as np\n'), ((10685, 10698), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10696, 10698), False, 'from collections import OrderedDict\n'), ((10827, 10895), 'xarray.DataArray', 'xr.DataArray', (['[delta.days + delta.seconds / 86400]'], {'dims': "{'TIME': 1}"}), "([delta.days + delta.seconds / 86400], dims={'TIME': 1})\n", (10839, 10895), True, 'import xarray as xr\n'), ((10929, 10967), 'xarray.DataArray', 'xr.DataArray', (['[0.0]'], {'dims': "{'DEPTH': 1}"}), "([0.0], dims={'DEPTH': 1})\n", (10941, 10967), True, 'import xarray as xr\n'), ((13676, 13768), 'xarray.DataArray', 'xr.DataArray', (['sdnTime_QCflag'], {'dims': "{'TIME': 1}", 'coords': "{'TIME': self.variables['TIME']}"}), "(sdnTime_QCflag, dims={'TIME': 1}, coords={'TIME': self.\n variables['TIME']})\n", (13688, 13768), True, 'import xarray as xr\n'), ((14237, 14329), 'xarray.DataArray', 'xr.DataArray', (['sdnTime_QCflag'], {'dims': "{'TIME': 1}", 'coords': "{'TIME': self.variables['TIME']}"}), "(sdnTime_QCflag, dims={'TIME': 1}, coords={'TIME': self.\n variables['TIME']})\n", (14249, 14329), True, 'import xarray as xr\n'), ((16502, 16539), 'numpy.meshgrid', 'np.meshgrid', (['radVel.RNGE', 'radVel.BEAR'], {}), '(radVel.RNGE, radVel.BEAR)\n', (16513, 16539), True, 'import numpy as np\n'), ((18123, 18211), 'xarray.DataArray', 'xr.DataArray', (['avgRadBear'], {'dims': "{'TIME': 1}", 'coords': "{'TIME': self.variables['TIME']}"}), "(avgRadBear, dims={'TIME': 1}, coords={'TIME': self.variables[\n 'TIME']})\n", (18135, 18211), True, 'import xarray as xr\n'), ((18397, 18485), 'xarray.DataArray', 'xr.DataArray', (['avgRadBear'], {'dims': "{'TIME': 1}", 'coords': "{'TIME': self.variables['TIME']}"}), "(avgRadBear, dims={'TIME': 1}, coords={'TIME': self.variables[\n 'TIME']})\n", (18409, 18485), True, 'import xarray as xr\n'), ((18829, 18864), 'numpy.isnan', 'np.isnan', (["self.variables['CSPD_QC']"], {}), "(self.variables['CSPD_QC'])\n", (18837, 18864), True, 'import numpy as np\n'), ((18976, 19051), 'numpy.select', 'np.select', (['[condicion & ~isNan, ~condicion & ~isNan, isNan]', '[1, 4, np.nan]'], {}), '([condicion & ~isNan, ~condicion & ~isNan, isNan], [1, 4, np.nan])\n', (18985, 19051), True, 'import numpy as np\n'), ((20218, 20274), 'logging.info', 'logging.info', (["('Fichero: %s Radar: %s' % (fichero, radar))"], {}), "('Fichero: %s Radar: %s' % (fichero, radar))\n", (20230, 20274), False, 'import logging\n'), ((21799, 21856), 'xarray.DataArray', 'xr.DataArray', (['[[siteLat]]'], {'dims': "{'TIME': 1, 'MAXSITE': 1}"}), "([[siteLat]], dims={'TIME': 1, 'MAXSITE': 1})\n", (21811, 21856), True, 'import xarray as xr\n'), ((21890, 21947), 'xarray.DataArray', 'xr.DataArray', (['[[siteLon]]'], {'dims': "{'TIME': 1, 'MAXSITE': 1}"}), "([[siteLon]], dims={'TIME': 1, 'MAXSITE': 1})\n", (21902, 21947), True, 'import xarray as xr\n'), ((21981, 22038), 'xarray.DataArray', 'xr.DataArray', (['[[siteLat]]'], {'dims': "{'TIME': 1, 'MAXSITE': 1}"}), "([[siteLat]], dims={'TIME': 1, 'MAXSITE': 1})\n", (21993, 22038), True, 'import xarray as xr\n'), ((22072, 22129), 'xarray.DataArray', 'xr.DataArray', (['[[siteLon]]'], {'dims': "{'TIME': 1, 'MAXSITE': 1}"}), "([[siteLon]], dims={'TIME': 1, 'MAXSITE': 1})\n", (22084, 22129), True, 'import xarray as xr\n'), ((22451, 22493), 'xarray.DataArray', 'xr.DataArray', (['[numSites]'], {'dims': "{'TIME': 1}"}), "([numSites], dims={'TIME': 1})\n", (22463, 22493), True, 'import xarray as xr\n'), ((22527, 22569), 'xarray.DataArray', 'xr.DataArray', (['[numSites]'], {'dims': "{'TIME': 1}"}), "([numSites], dims={'TIME': 1})\n", (22539, 22569), True, 'import xarray as xr\n'), ((23143, 23169), 'xarray.Dataset', 'xr.Dataset', (['self.variables'], {}), '(self.variables)\n', (23153, 23169), True, 'import xarray as xr\n'), ((28296, 28342), 'os.path.join', 'os.path.join', (['path_out', '"""HFR-Galicia-%s_%s.nc"""'], {}), "(path_out, 'HFR-Galicia-%s_%s.nc')\n", (28308, 28342), False, 'import os\n'), ((29819, 29943), 'pyproj.Proj', 'Proj', (['"""+proj=tmerc +bR_a=6370997.0 +units=m +lat_0=42.0 +lon_0=-8.0 +x_0=249341.9581021159 +y_0=17861.19187674373"""'], {}), "(\n '+proj=tmerc +bR_a=6370997.0 +units=m +lat_0=42.0 +lon_0=-8.0 +x_0=249341.9581021159 +y_0=17861.19187674373'\n )\n", (29823, 29943), False, 'from pyproj import Proj\n'), ((30507, 30526), 'numpy.sort', 'np.sort', (['(BEAR % 360)'], {}), '(BEAR % 360)\n', (30514, 30526), True, 'import numpy as np\n'), ((30790, 30834), 'numpy.array', 'np.array', (['[(RNGE * x + origen_x) for x in X]'], {}), '([(RNGE * x + origen_x) for x in X])\n', (30798, 30834), True, 'import numpy as np\n'), ((30845, 30889), 'numpy.array', 'np.array', (['[(RNGE * y + origen_y) for y in Y]'], {}), '([(RNGE * y + origen_y) for y in Y])\n', (30853, 30889), True, 'import numpy as np\n'), ((31307, 31400), 'xarray.DataArray', 'xr.DataArray', (['X'], {'dims': "{'BEAR': nBEAR, 'RNGE': nRNGE}", 'coords': "{'BEAR': BEAR, 'RNGE': RNGE}"}), "(X, dims={'BEAR': nBEAR, 'RNGE': nRNGE}, coords={'BEAR': BEAR,\n 'RNGE': RNGE})\n", (31319, 31400), True, 'import xarray as xr\n'), ((31414, 31507), 'xarray.DataArray', 'xr.DataArray', (['Y'], {'dims': "{'BEAR': nBEAR, 'RNGE': nRNGE}", 'coords': "{'BEAR': BEAR, 'RNGE': RNGE}"}), "(Y, dims={'BEAR': nBEAR, 'RNGE': nRNGE}, coords={'BEAR': BEAR,\n 'RNGE': RNGE})\n", (31426, 31507), True, 'import xarray as xr\n'), ((31644, 31744), 'xarray.DataArray', 'xr.DataArray', (['longitud'], {'dims': "{'BEAR': nBEAR, 'RNGE': nRNGE}", 'coords': "{'BEAR': BEAR, 'RNGE': RNGE}"}), "(longitud, dims={'BEAR': nBEAR, 'RNGE': nRNGE}, coords={'BEAR':\n BEAR, 'RNGE': RNGE})\n", (31656, 31744), True, 'import xarray as xr\n'), ((31778, 31877), 'xarray.DataArray', 'xr.DataArray', (['latitud'], {'dims': "{'BEAR': nBEAR, 'RNGE': nRNGE}", 'coords': "{'BEAR': BEAR, 'RNGE': RNGE}"}), "(latitud, dims={'BEAR': nBEAR, 'RNGE': nRNGE}, coords={'BEAR':\n BEAR, 'RNGE': RNGE})\n", (31790, 31877), True, 'import xarray as xr\n'), ((32119, 32143), 'xarray.open_dataset', 'xr.open_dataset', (['fichero'], {}), '(fichero)\n', (32134, 32143), True, 'import xarray as xr\n'), ((32360, 32385), 'numpy.abs', 'np.abs', (['(radVel - radVel1h)'], {}), '(radVel - radVel1h)\n', (32366, 32385), True, 'import numpy as np\n'), ((32417, 32444), 'numpy.abs', 'np.abs', (['(radVel2h - radVel1h)'], {}), '(radVel2h - radVel1h)\n', (32423, 32444), True, 'import numpy as np\n'), ((32506, 32522), 'numpy.isnan', 'np.isnan', (['radVel'], {}), '(radVel)\n', (32514, 32522), True, 'import numpy as np\n'), ((32525, 32543), 'numpy.isnan', 'np.isnan', (['radVel2h'], {}), '(radVel2h)\n', (32533, 32543), True, 'import numpy as np\n'), ((32589, 32607), 'numpy.isnan', 'np.isnan', (['radVel1h'], {}), '(radVel1h)\n', (32597, 32607), True, 'import numpy as np\n'), ((34238, 34261), 'os.path.isfile', 'os.path.isfile', (['fichero'], {}), '(fichero)\n', (34252, 34261), False, 'import os\n'), ((34324, 34378), 'logging.info', 'logging.info', (["('Procesando VART_QC en %s' % ficheros[1])"], {}), "('Procesando VART_QC en %s' % ficheros[1])\n", (34336, 34378), False, 'import logging\n'), ((34423, 34449), 'logging.info', 'logging.info', (['"""No VART_QC"""'], {}), "('No VART_QC')\n", (34435, 34449), False, 'import logging\n'), ((6117, 6133), 'numpy.radians', 'np.radians', (['lats'], {}), '(lats)\n', (6127, 6133), True, 'import numpy as np\n'), ((10773, 10793), 'datetime.datetime', 'datetime', (['(1950)', '(1)', '(1)'], {}), '(1950, 1, 1)\n', (10781, 10793), False, 'from datetime import datetime, timedelta\n'), ((16412, 16434), 'numpy.isnan', 'np.isnan', (['radVel[0, 0]'], {}), '(radVel[0, 0])\n', (16420, 16434), True, 'import numpy as np\n'), ((20354, 20365), 'numpy.int16', 'np.int16', (['(0)'], {}), '(0)\n', (20362, 20365), True, 'import numpy as np\n'), ((20542, 20577), 'numpy.int16', 'np.int16', (['[[SDN_EDMO_CODEs[radar]]]'], {}), '([[SDN_EDMO_CODEs[radar]]])\n', (20550, 20577), True, 'import numpy as np\n'), ((20775, 20793), 'numpy.array', 'np.array', (['[cadena]'], {}), '([cadena])\n', (20783, 20793), True, 'import numpy as np\n'), ((20944, 20962), 'numpy.array', 'np.array', (['[cadena]'], {}), '([cadena])\n', (20952, 20962), True, 'import numpy as np\n'), ((21152, 21170), 'numpy.array', 'np.array', (['[cadena]'], {}), '([cadena])\n', (21160, 21170), True, 'import numpy as np\n'), ((21373, 21391), 'numpy.array', 'np.array', (['[cadena]'], {}), '([cadena])\n', (21381, 21391), True, 'import numpy as np\n'), ((21656, 21676), 'numpy.array', 'np.array', (['[[cadena]]'], {}), '([[cadena]])\n', (21664, 21676), True, 'import numpy as np\n'), ((22242, 22262), 'numpy.array', 'np.array', (['[[cadena]]'], {}), '([[cadena]])\n', (22250, 22262), True, 'import numpy as np\n'), ((22342, 22362), 'numpy.array', 'np.array', (['[[cadena]]'], {}), '([[cadena]])\n', (22350, 22362), True, 'import numpy as np\n'), ((27910, 27931), 'numpy.dtype', 'np.dtype', (['dtypes[var]'], {}), '(dtypes[var])\n', (27918, 27931), True, 'import numpy as np\n'), ((30632, 30654), 'numpy.sin', 'np.sin', (['(BEAR * deg2rad)'], {}), '(BEAR * deg2rad)\n', (30638, 30654), True, 'import numpy as np\n'), ((30656, 30678), 'numpy.cos', 'np.cos', (['(BEAR * deg2rad)'], {}), '(BEAR * deg2rad)\n', (30662, 30678), True, 'import numpy as np\n'), ((3041, 3068), 'numpy.int_', 'np.int_', (['scale_factors[key]'], {}), '(scale_factors[key])\n', (3048, 3068), True, 'import numpy as np\n'), ((3114, 3124), 'numpy.int_', 'np.int_', (['(0)'], {}), '(0)\n', (3121, 3124), True, 'import numpy as np\n'), ((3248, 3269), 'numpy.finfo', 'np.finfo', (['dtypes[key]'], {}), '(dtypes[key])\n', (3256, 3269), True, 'import numpy as np\n'), ((3315, 3336), 'numpy.iinfo', 'np.iinfo', (['dtypes[key]'], {}), '(dtypes[key])\n', (3323, 3336), True, 'import numpy as np\n'), ((11565, 11592), 'numpy.expand_dims', 'np.expand_dims', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (11579, 11592), True, 'import numpy as np\n'), ((11652, 11679), 'numpy.expand_dims', 'np.expand_dims', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (11666, 11679), True, 'import numpy as np\n'), ((11724, 12021), 'xarray.DataArray', 'xr.DataArray', (['tmp'], {'dims': "{'TIME': 1, 'DEPTH': 1, 'BEAR': grid.nBEAR, 'RNGE': grid.nRNGE}", 'coords': "{'TIME': self.variables['TIME'], 'DEPH': self.variables['DEPH'], 'BEAR':\n grid.BEAR, 'RNGE': grid.RNGE, 'LONGITUDE': grid.longitud, 'LATITUDE':\n grid.latitud, 'XDST': grid.X, 'YDST': grid.Y}"}), "(tmp, dims={'TIME': 1, 'DEPTH': 1, 'BEAR': grid.nBEAR, 'RNGE':\n grid.nRNGE}, coords={'TIME': self.variables['TIME'], 'DEPH': self.\n variables['DEPH'], 'BEAR': grid.BEAR, 'RNGE': grid.RNGE, 'LONGITUDE':\n grid.longitud, 'LATITUDE': grid.latitud, 'XDST': grid.X, 'YDST': grid.Y})\n", (11736, 12021), True, 'import xarray as xr\n'), ((14992, 15006), 'numpy.isnan', 'np.isnan', (['owtr'], {}), '(owtr)\n', (15000, 15006), True, 'import numpy as np\n'), ((15516, 15532), 'numpy.isnan', 'np.isnan', (['radVel'], {}), '(radVel)\n', (15524, 15532), True, 'import numpy as np\n'), ((15938, 15954), 'numpy.isnan', 'np.isnan', (['varVec'], {}), '(varVec)\n', (15946, 15954), True, 'import numpy as np\n'), ((17772, 17802), 'numpy.isnan', 'np.isnan', (['radVelMedianFiltered'], {}), '(radVelMedianFiltered)\n', (17780, 17802), True, 'import numpy as np\n'), ((30311, 30327), 'numpy.arange', 'np.arange', (['nRNGE'], {}), '(nRNGE)\n', (30320, 30327), True, 'import numpy as np\n'), ((30395, 30411), 'numpy.arange', 'np.arange', (['nBEAR'], {}), '(nBEAR)\n', (30404, 30411), True, 'import numpy as np\n'), ((15461, 15475), 'numpy.abs', 'np.abs', (['radVel'], {}), '(radVel)\n', (15467, 15475), True, 'import numpy as np\n'), ((15489, 15503), 'numpy.abs', 'np.abs', (['radVel'], {}), '(radVel)\n', (15495, 15503), True, 'import numpy as np\n'), ((17342, 17381), 'numpy.median', 'np.median', (['radVel[0, 0].values[ventana]'], {}), '(radVel[0, 0].values[ventana])\n', (17351, 17381), True, 'import numpy as np\n'), ((17569, 17606), 'numpy.abs', 'np.abs', (['(radVelMedianFiltered - radVel)'], {}), '(radVelMedianFiltered - radVel)\n', (17575, 17606), True, 'import numpy as np\n'), ((17671, 17708), 'numpy.abs', 'np.abs', (['(radVelMedianFiltered - radVel)'], {}), '(radVelMedianFiltered - radVel)\n', (17677, 17708), True, 'import numpy as np\n'), ((25436, 25528), 'logging.info', 'logging.info', (["('No puedo cargar el atributo --> %s del fichero radial' % atributo_fichero)"], {}), "('No puedo cargar el atributo --> %s del fichero radial' %\n atributo_fichero)\n", (25448, 25528), False, 'import logging\n'), ((26072, 26093), 'numpy.dtype', 'np.dtype', (['dtypes[var]'], {}), '(dtypes[var])\n', (26080, 26093), True, 'import numpy as np\n'), ((28084, 28122), 'numpy.float_', 'np.float_', (["atributos[var]['valid_min']"], {}), "(atributos[var]['valid_min'])\n", (28093, 28122), True, 'import numpy as np\n'), ((28187, 28225), 'numpy.float_', 'np.float_', (["atributos[var]['valid_max']"], {}), "(atributos[var]['valid_max'])\n", (28196, 28225), True, 'import numpy as np\n'), ((14753, 14769), 'numpy.isnan', 'np.isnan', (['radVel'], {}), '(radVel)\n', (14761, 14769), True, 'import numpy as np\n'), ((16800, 16848), 'numpy.sqrt', 'np.sqrt', (['((X - X[j, i]) ** 2 + (Y - Y[j, i]) ** 2)'], {}), '((X - X[j, i]) ** 2 + (Y - Y[j, i]) ** 2)\n', (16807, 16848), True, 'import numpy as np\n'), ((17150, 17161), 'numpy.abs', 'np.abs', (['dif'], {}), '(dif)\n', (17156, 17161), True, 'import numpy as np\n'), ((24683, 24704), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (24692, 24704), False, 'from datetime import datetime, timedelta\n'), ((24785, 24806), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (24794, 24806), False, 'from datetime import datetime, timedelta\n'), ((26478, 26499), 'numpy.dtype', 'np.dtype', (['dtypes[var]'], {}), '(dtypes[var])\n', (26486, 26499), True, 'import numpy as np\n'), ((14624, 14638), 'numpy.isnan', 'np.isnan', (['bear'], {}), '(bear)\n', (14632, 14638), True, 'import numpy as np\n'), ((26250, 26278), 'numpy.int_', 'np.int_', (['atributos[var][key]'], {}), '(atributos[var][key])\n', (26257, 26278), True, 'import numpy as np\n'), ((34136, 34155), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-i)'}), '(hours=-i)\n', (34145, 34155), False, 'from datetime import datetime, timedelta\n'), ((26656, 26685), 'numpy.array', 'np.array', (['atributos[var][key]'], {}), '(atributos[var][key])\n', (26664, 26685), True, 'import numpy as np\n'), ((24847, 24861), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (24859, 24861), False, 'from datetime import datetime, timedelta\n')] |
#!/usr/bin/env python3
import numpy as np
from main import sympy_simplex, LP
"""
use jupyter notebook or qtconsole to see formatted results
e.g. > jupyter qtconsole
then in the console:
> %load usage.py
then press ENTER twice
"""
aufgabe1 = LP( # Blatt 2
np.matrix('2 0 6; -2 8 4; 3 6 5'),
np.matrix('10; 12; 20'),
np.matrix('2; 1; 3; 0; 0; 0'),
[4, 5, 6])
# sympy_simplex(aufgabe1)
kreise_example = LP( # Book Page 31
np.matrix('-0.5 -5.5 -2.5 9; 0.5 -1.5 -0.5 1; 1 0 0 0'), # A
np.matrix('0; 0; 1'), # b
np.matrix('10; -57; -9; -24; 0; 0; 0'), # c
[5, 6, 7]
)
# sympy_simplex(kreise_example)
blatt5_aufgabe1 = LP(
np.matrix('2 3; 4 1; 1 1; 2 1'), # A
np.matrix('12000; 16000; 4300; 8200'), # b
np.matrix('5; 4; 0; 0; 0; 0'), # c
[3, 4, 5, 6]
)
sympy_simplex(blatt5_aufgabe1)
| [
"numpy.matrix",
"main.sympy_simplex"
] | [((810, 840), 'main.sympy_simplex', 'sympy_simplex', (['blatt5_aufgabe1'], {}), '(blatt5_aufgabe1)\n', (823, 840), False, 'from main import sympy_simplex, LP\n'), ((267, 300), 'numpy.matrix', 'np.matrix', (['"""2 0 6; -2 8 4; 3 6 5"""'], {}), "('2 0 6; -2 8 4; 3 6 5')\n", (276, 300), True, 'import numpy as np\n'), ((306, 329), 'numpy.matrix', 'np.matrix', (['"""10; 12; 20"""'], {}), "('10; 12; 20')\n", (315, 329), True, 'import numpy as np\n'), ((335, 364), 'numpy.matrix', 'np.matrix', (['"""2; 1; 3; 0; 0; 0"""'], {}), "('2; 1; 3; 0; 0; 0')\n", (344, 364), True, 'import numpy as np\n'), ((449, 504), 'numpy.matrix', 'np.matrix', (['"""-0.5 -5.5 -2.5 9; 0.5 -1.5 -0.5 1; 1 0 0 0"""'], {}), "('-0.5 -5.5 -2.5 9; 0.5 -1.5 -0.5 1; 1 0 0 0')\n", (458, 504), True, 'import numpy as np\n'), ((515, 535), 'numpy.matrix', 'np.matrix', (['"""0; 0; 1"""'], {}), "('0; 0; 1')\n", (524, 535), True, 'import numpy as np\n'), ((546, 584), 'numpy.matrix', 'np.matrix', (['"""10; -57; -9; -24; 0; 0; 0"""'], {}), "('10; -57; -9; -24; 0; 0; 0')\n", (555, 584), True, 'import numpy as np\n'), ((667, 698), 'numpy.matrix', 'np.matrix', (['"""2 3; 4 1; 1 1; 2 1"""'], {}), "('2 3; 4 1; 1 1; 2 1')\n", (676, 698), True, 'import numpy as np\n'), ((708, 745), 'numpy.matrix', 'np.matrix', (['"""12000; 16000; 4300; 8200"""'], {}), "('12000; 16000; 4300; 8200')\n", (717, 745), True, 'import numpy as np\n'), ((755, 784), 'numpy.matrix', 'np.matrix', (['"""5; 4; 0; 0; 0; 0"""'], {}), "('5; 4; 0; 0; 0; 0')\n", (764, 784), True, 'import numpy as np\n')] |
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import copy
import numpy as np
from src.common.node import Node
from src.common.route import Map
from src.conf.configs import Configs
from src.utils.input_utils import get_factory_info, get_route_map
from src.utils.json_tools import convert_nodes_to_json
from src.utils.json_tools import get_vehicle_instance_dict, get_order_item_dict
from src.utils.json_tools import read_json_from_file, write_json_to_file
from src.utils.logging_engine import logger
from algorithm.wolrd import World
ids = np.load("algorithm/factory2id.npy")
paths = np.load("algorithm/shortest_path.npy")
factory2id = {}
for i in range(len(ids)):
factory2id[ids[i]] = i
world = World("algorithm/route_info.csv", "algorithm/factory_info.csv")
# Not working since for now the simulator do not support redesign route.
def get_shortest_path(start, end):
path = paths[factory2id[start]][factory2id[end]]
shortest_path = []
distance = 0
time = 0
i = 0
while ids[int(path[i])] not in shortest_path:
if int(path[i]) == 0 and ids[int(path[i])] != end and int(path[i + 1]) == 0:
break
shortest_path.append(ids[int(path[i])])
if len(shortest_path) == 1:
i += 1
continue
distance += world.id2factory[shortest_path[-1]].origins[shortest_path[-2]].distance
time += world.id2factory[shortest_path[-1]].origins[shortest_path[-2]].time
i += 1
return shortest_path, distance, time
# naive dispatching method
def dispatch_orders_to_vehicles(id_to_unallocated_order_item: dict, id_to_vehicle: dict, id_to_factory: dict):
"""
:param id_to_unallocated_order_item: item_id ——> OrderItem object(state: "GENERATED")
:param id_to_vehicle: vehicle_id ——> Vehicle object
:param id_to_factory: factory_id ——> factory object
"""
vehicle_id_to_destination = {}
vehicle_id_to_planned_route = {}
# dealing with the carrying items of vehicles (处理车辆身上已经装载的货物)
for vehicle_id, vehicle in id_to_vehicle.items():
unloading_sequence_of_items = vehicle.get_unloading_sequence()
vehicle_id_to_planned_route[vehicle_id] = []
if len(unloading_sequence_of_items) > 0:
delivery_item_list = []
factory_id = unloading_sequence_of_items[0].delivery_factory_id
for item in unloading_sequence_of_items:
if item.delivery_factory_id == factory_id:
delivery_item_list.append(item)
else:
factory = id_to_factory.get(factory_id)
# if not vehicle_id_to_planned_route[vehicle_id]:
# path = get_shortest_path(vehicle.destination.id, factory_id)[0]
# for i in range(len(path) - 1):
# node = Node(path[i], world.id2factory[path[i]].lon, world.id2factory[path[i]].lat, [], [])
# vehicle_id_to_planned_route[vehicle_id].append(node)
node = Node(factory_id, factory.lng, factory.lat, [], copy.copy(delivery_item_list))
vehicle_id_to_planned_route[vehicle_id].append(node)
delivery_item_list = [item]
factory_id = item.delivery_factory_id
if len(delivery_item_list) > 0:
factory = id_to_factory.get(factory_id)
# if not vehicle_id_to_planned_route[vehicle_id]:
# path = get_shortest_path(vehicle.destination.id, factory_id)[0]
# for i in range(len(path) - 1):
# node = Node(path[i], world.id2factory[path[i]].lon, world.id2factory[path[i]].lat, [], [])
# vehicle_id_to_planned_route[vehicle_id].append(node)
node = Node(factory_id, factory.lng, factory.lat, [], copy.copy(delivery_item_list))
vehicle_id_to_planned_route[vehicle_id].append(node)
# for the empty vehicle, it has been allocated to the order, but have not yet arrived at the pickup factory
pre_matching_item_ids = []
for vehicle_id, vehicle in id_to_vehicle.items():
if vehicle.carrying_items.is_empty() and vehicle.destination is not None:
pickup_items = vehicle.destination.pickup_items
# pickup_node, delivery_node = __create_pickup_and_delivery_nodes_of_items(pickup_items, id_to_factory)
# vehicle_id_to_planned_route[vehicle_id].append(pickup_node)
# vehicle_id_to_planned_route[vehicle_id].append(delivery_node)
nodelist = __create_pickup_and_delivery_nodes_of_items(pickup_items, id_to_factory)
pickup_node = nodelist[0]
delivery_node = nodelist[1]
if len(pickup_node.pickup_items) > 0:
check = 0
if len(vehicle_id_to_planned_route[vehicle.id]) > 0:
n = vehicle_id_to_planned_route[vehicle.id][-1]
if n.id == pickup_node.id:
n.pickup_items += pickup_node.pickup_items
vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
check = 1
if not check:
vehicle_id_to_planned_route[vehicle.id].append(pickup_node)
vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
pre_matching_item_ids.extend([item.id for item in pickup_items])
# dispatch unallocated orders to vehicles
capacity = __get_capacity_of_vehicle(id_to_vehicle)
order_id_to_items = {}
for item_id, item in id_to_unallocated_order_item.items():
if item_id in pre_matching_item_ids:
continue
order_id = item.order_id
if order_id not in order_id_to_items:
order_id_to_items[order_id] = []
order_id_to_items[order_id].append(item)
# vehicle_index = 0
vehicles = [vehicle for vehicle in id_to_vehicle.values()]
for order_id, items in order_id_to_items.items():
demand = __calculate_demand(items)
if demand > capacity:
cur_demand = 0
tmp_items = []
for item in items:
if cur_demand + item.demand > capacity:
# pickup_node, delivery_node = __create_pickup_and_delivery_nodes_of_items(tmp_items, id_to_factory)
# if pickup_node is None or delivery_node is None:
# continue
nodelist = __create_pickup_and_delivery_nodes_of_items(tmp_items, id_to_factory)
if len(nodelist) < 2:
continue
vehicle = select_vehicle_for_orders(vehicles, tmp_items, vehicle_id_to_planned_route)
pickup_node = nodelist[0]
delivery_node = nodelist[1]
if len(pickup_node.pickup_items) > 0:
check = 0
if len(vehicle_id_to_planned_route[vehicle.id]) > 0:
n = vehicle_id_to_planned_route[vehicle.id][-1]
if n.id == pickup_node.id:
n.pickup_items += pickup_node.pickup_items
vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
check = 1
if not check:
vehicle_id_to_planned_route[vehicle.id].append(pickup_node)
vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
# vehicle_id_to_planned_route[vehicle.id].append(pickup_node)
# vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
# vehicle_index = (vehicle_index + 1) % len(vehicles)
tmp_items = []
cur_demand = 0
tmp_items.append(item)
cur_demand += item.demand
if len(tmp_items) > 0:
# pickup_node, delivery_node = __create_pickup_and_delivery_nodes_of_items(tmp_items, id_to_factory)
# if pickup_node is None or delivery_node is None:
# continue
nodelist = __create_pickup_and_delivery_nodes_of_items(tmp_items, id_to_factory)
if len(nodelist) < 2:
continue
vehicle = select_vehicle_for_orders(vehicles, tmp_items, vehicle_id_to_planned_route)
pickup_node = nodelist[0]
delivery_node = nodelist[1]
if len(pickup_node.pickup_items) > 0:
check = 0
if len(vehicle_id_to_planned_route[vehicle.id]) > 0:
n = vehicle_id_to_planned_route[vehicle.id][-1]
if n.id == pickup_node.id:
n.pickup_items += pickup_node.pickup_items
vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
check = 1
if not check:
vehicle_id_to_planned_route[vehicle.id].append(pickup_node)
vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
# vehicle_id_to_planned_route[vehicle.id].append(pickup_node)
# vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
else:
# pickup_node, delivery_node = __create_pickup_and_delivery_nodes_of_items(items, id_to_factory)
nodelist = __create_pickup_and_delivery_nodes_of_items(items, id_to_factory)
if len(nodelist) < 2:
continue
vehicle = select_vehicle_for_orders(vehicles, items, vehicle_id_to_planned_route)
pickup_node = nodelist[0]
delivery_node = nodelist[1]
if len(pickup_node.pickup_items) > 0:
check = 0
if len(vehicle_id_to_planned_route[vehicle.id]) > 0:
n = vehicle_id_to_planned_route[vehicle.id][-1]
if n.id == pickup_node.id:
n.pickup_items += pickup_node.pickup_items
vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
check = 1
if not check:
vehicle_id_to_planned_route[vehicle.id].append(pickup_node)
vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
# vehicle_id_to_planned_route[vehicle.id].append(pickup_node)
# vehicle_id_to_planned_route[vehicle.id].append(delivery_node)
# vehicle_index = (vehicle_index + 1) % len(vehicles)
# print(route_cost(vehicles[0], vehicle_id_to_planned_route[vehicles[0].id]))
# create the output of the algorithm
for vehicle_id, vehicle in id_to_vehicle.items():
origin_planned_route = vehicle_id_to_planned_route.get(vehicle_id)
destination = None
planned_route = []
# determine the destination
if vehicle.destination is not None:
if len(origin_planned_route) == 0:
logger.error(f"Planned route of vehicle {vehicle_id} is wrong")
else:
destination = origin_planned_route[0]
destination.arrive_time = vehicle.destination.arrive_time
planned_route = [origin_planned_route[i] for i in range(1, len(origin_planned_route))]
elif len(origin_planned_route) > 0:
destination = origin_planned_route[0]
planned_route = [origin_planned_route[i] for i in range(1, len(origin_planned_route))]
vehicle_id_to_destination[vehicle_id] = destination
vehicle_id_to_planned_route[vehicle_id] = planned_route
return vehicle_id_to_destination, vehicle_id_to_planned_route
def route_cost(vehicle, route):
time = vehicle.gps_update_time
# todo : more accurate starting time
v_loc = route[0].id
capacity_limit = vehicle.board_capacity
capacity = 0
for item in vehicle.carrying_items.items:
capacity += item.demand
cost = 0
for node in route:
time += world.id2factory[v_loc].destinations[node.id].time
for item in node.delivery_items:
time += item.unload_time
cost += max(0, time - item.committed_completion_time)
capacity -= item.demand
# todo : cost is per item or per order?
for item in node.pickup_items:
time += item.load_time
capacity += item.demand
if capacity > capacity_limit:
return -1
v_loc = node.id
return cost
def select_vehicle_for_orders(vehicles, items, vehicle_id_to_planned_route):
min_time = 2147483648
vehicle = None
perm = np.random.permutation(len(vehicles))
for i in range(len(vehicles)):
v = vehicles[perm[i]]
tim = 0
v_loc = ""
if len(v.carrying_items.items) == 0 and not v.destination:
v_loc = v.cur_factory_id
if v_loc == "":
if not v.destination:
v_loc = items[0].pickup_factory_id
else:
# v_loc = v.destination.id
tim += v.destination.leave_time - v.gps_update_time
v_loc = v.destination.id
# if len(v.destination.pickup_items) == 0:
# v_loc = v.destination.id
# else:
# v_loc = v.destination.pickup_items[0].delivery_factory_id
# tim += world.id2factory[v.destination.id].destinations[
# v.destination.pickup_items[0].delivery_factory_id].time + v.destination.pickup_items[
# 0].unload_time + v.destination.pickup_items[0].load_time
cur_loc = v_loc
for node in vehicle_id_to_planned_route[v.id]:
if node.id == cur_loc:
continue
tim += world.id2factory[cur_loc].destinations[node.id].time
cur_loc = node.id
if len(node.pickup_items) > 0:
tim += node.pickup_items[0].load_time
if len(node.delivery_items) > 0:
tim += node.delivery_items[0].unload_time
tim += world.id2factory[v_loc].destinations[items[0].pickup_factory_id].time
# if len(v.carrying_items.items) == 0:
# tim -= 100000
if tim < min_time:
min_time = tim
vehicle = v
return vehicle
def __calculate_demand(item_list: list):
demand = 0
for item in item_list:
demand += item.demand
return demand
def __get_capacity_of_vehicle(id_to_vehicle: dict):
for vehicle_id, vehicle in id_to_vehicle.items():
return vehicle.board_capacity
def __create_pickup_and_delivery_nodes_of_items(items: list, id_to_factory: dict):
pickup_factory_id = __get_pickup_factory_id(items)
delivery_factory_id = __get_delivery_factory_id(items)
if len(pickup_factory_id) == 0 or len(delivery_factory_id) == 0:
return None, None
pickup_factory = id_to_factory.get(pickup_factory_id)
delivery_factory = id_to_factory.get(delivery_factory_id)
pickup_node = Node(pickup_factory.id, pickup_factory.lng, pickup_factory.lat, copy.copy(items), [])
nodelist = [pickup_node]
# path = get_shortest_path(pickup_factory_id, delivery_factory_id)[0]
# for i in range(len(path)):
# if i == 0 or i == len(path) - 1:
# continue
# nodelist.append(Node(path[i], world.id2factory[path[i]].lon, world.id2factory[path[i]].lat, [], []))
delivery_items = []
last_index = len(items) - 1
for i in range(len(items)):
delivery_items.append(items[last_index - i])
delivery_node = Node(delivery_factory.id, delivery_factory.lng, delivery_factory.lat, [], copy.copy(delivery_items))
nodelist.append(delivery_node)
return nodelist
def __get_pickup_factory_id(items):
if len(items) == 0:
logger.error("Length of items is 0")
return ""
factory_id = items[0].pickup_factory_id
for item in items:
if item.pickup_factory_id != factory_id:
logger.error("The pickup factory of these items is not the same")
return ""
return factory_id
def __get_delivery_factory_id(items):
if len(items) == 0:
logger.error("Length of items is 0")
return ""
factory_id = items[0].delivery_factory_id
for item in items:
if item.delivery_factory_id != factory_id:
logger.error("The delivery factory of these items is not the same")
return ""
return factory_id
"""
Main body
# Note
# This is the demo to show the main flowchart of the algorithm
"""
def scheduling():
# read the input json, you can design your own classes
id_to_factory, id_to_unallocated_order_item, id_to_ongoing_order_item, id_to_vehicle = __read_input_json()
# dispatching algorithm
vehicle_id_to_destination, vehicle_id_to_planned_route = dispatch_orders_to_vehicles(
id_to_unallocated_order_item,
id_to_vehicle,
id_to_factory)
# output the dispatch result
__output_json(vehicle_id_to_destination, vehicle_id_to_planned_route)
def __read_input_json():
# read the factory info
id_to_factory = get_factory_info(Configs.factory_info_file_path)
# read the route map
# code_to_route = get_route_map(Configs.route_info_file_path)
# route_map = Map(code_to_route)
# read the input json, you can design your own classes
unallocated_order_items = read_json_from_file(Configs.algorithm_unallocated_order_items_input_path)
id_to_unallocated_order_item = get_order_item_dict(unallocated_order_items, 'OrderItem')
ongoing_order_items = read_json_from_file(Configs.algorithm_ongoing_order_items_input_path)
id_to_ongoing_order_item = get_order_item_dict(ongoing_order_items, 'OrderItem')
id_to_order_item = {**id_to_unallocated_order_item, **id_to_ongoing_order_item}
vehicle_infos = read_json_from_file(Configs.algorithm_vehicle_input_info_path)
id_to_vehicle = get_vehicle_instance_dict(vehicle_infos, id_to_order_item, id_to_factory)
return id_to_factory, id_to_unallocated_order_item, id_to_ongoing_order_item, id_to_vehicle
def __output_json(vehicle_id_to_destination, vehicle_id_to_planned_route):
write_json_to_file(Configs.algorithm_output_destination_path, convert_nodes_to_json(vehicle_id_to_destination))
write_json_to_file(Configs.algorithm_output_planned_route_path, convert_nodes_to_json(vehicle_id_to_planned_route))
| [
"algorithm.wolrd.World",
"src.utils.logging_engine.logger.error",
"src.utils.json_tools.convert_nodes_to_json",
"src.utils.json_tools.read_json_from_file",
"src.utils.json_tools.get_order_item_dict",
"src.utils.json_tools.get_vehicle_instance_dict",
"copy.copy",
"numpy.load",
"src.utils.input_utils.... | [((1623, 1658), 'numpy.load', 'np.load', (['"""algorithm/factory2id.npy"""'], {}), "('algorithm/factory2id.npy')\n", (1630, 1658), True, 'import numpy as np\n'), ((1667, 1705), 'numpy.load', 'np.load', (['"""algorithm/shortest_path.npy"""'], {}), "('algorithm/shortest_path.npy')\n", (1674, 1705), True, 'import numpy as np\n'), ((1783, 1846), 'algorithm.wolrd.World', 'World', (['"""algorithm/route_info.csv"""', '"""algorithm/factory_info.csv"""'], {}), "('algorithm/route_info.csv', 'algorithm/factory_info.csv')\n", (1788, 1846), False, 'from algorithm.wolrd import World\n'), ((18447, 18495), 'src.utils.input_utils.get_factory_info', 'get_factory_info', (['Configs.factory_info_file_path'], {}), '(Configs.factory_info_file_path)\n', (18463, 18495), False, 'from src.utils.input_utils import get_factory_info, get_route_map\n'), ((18715, 18788), 'src.utils.json_tools.read_json_from_file', 'read_json_from_file', (['Configs.algorithm_unallocated_order_items_input_path'], {}), '(Configs.algorithm_unallocated_order_items_input_path)\n', (18734, 18788), False, 'from src.utils.json_tools import read_json_from_file, write_json_to_file\n'), ((18824, 18881), 'src.utils.json_tools.get_order_item_dict', 'get_order_item_dict', (['unallocated_order_items', '"""OrderItem"""'], {}), "(unallocated_order_items, 'OrderItem')\n", (18843, 18881), False, 'from src.utils.json_tools import get_vehicle_instance_dict, get_order_item_dict\n'), ((18909, 18978), 'src.utils.json_tools.read_json_from_file', 'read_json_from_file', (['Configs.algorithm_ongoing_order_items_input_path'], {}), '(Configs.algorithm_ongoing_order_items_input_path)\n', (18928, 18978), False, 'from src.utils.json_tools import read_json_from_file, write_json_to_file\n'), ((19010, 19063), 'src.utils.json_tools.get_order_item_dict', 'get_order_item_dict', (['ongoing_order_items', '"""OrderItem"""'], {}), "(ongoing_order_items, 'OrderItem')\n", (19029, 19063), False, 'from src.utils.json_tools import get_vehicle_instance_dict, get_order_item_dict\n'), ((19170, 19232), 'src.utils.json_tools.read_json_from_file', 'read_json_from_file', (['Configs.algorithm_vehicle_input_info_path'], {}), '(Configs.algorithm_vehicle_input_info_path)\n', (19189, 19232), False, 'from src.utils.json_tools import read_json_from_file, write_json_to_file\n'), ((19253, 19326), 'src.utils.json_tools.get_vehicle_instance_dict', 'get_vehicle_instance_dict', (['vehicle_infos', 'id_to_order_item', 'id_to_factory'], {}), '(vehicle_infos, id_to_order_item, id_to_factory)\n', (19278, 19326), False, 'from src.utils.json_tools import get_vehicle_instance_dict, get_order_item_dict\n'), ((16388, 16404), 'copy.copy', 'copy.copy', (['items'], {}), '(items)\n', (16397, 16404), False, 'import copy\n'), ((16959, 16984), 'copy.copy', 'copy.copy', (['delivery_items'], {}), '(delivery_items)\n', (16968, 16984), False, 'import copy\n'), ((17111, 17147), 'src.utils.logging_engine.logger.error', 'logger.error', (['"""Length of items is 0"""'], {}), "('Length of items is 0')\n", (17123, 17147), False, 'from src.utils.logging_engine import logger\n'), ((17478, 17514), 'src.utils.logging_engine.logger.error', 'logger.error', (['"""Length of items is 0"""'], {}), "('Length of items is 0')\n", (17490, 17514), False, 'from src.utils.logging_engine import logger\n'), ((19567, 19615), 'src.utils.json_tools.convert_nodes_to_json', 'convert_nodes_to_json', (['vehicle_id_to_destination'], {}), '(vehicle_id_to_destination)\n', (19588, 19615), False, 'from src.utils.json_tools import convert_nodes_to_json\n'), ((19685, 19735), 'src.utils.json_tools.convert_nodes_to_json', 'convert_nodes_to_json', (['vehicle_id_to_planned_route'], {}), '(vehicle_id_to_planned_route)\n', (19706, 19735), False, 'from src.utils.json_tools import convert_nodes_to_json\n'), ((17295, 17360), 'src.utils.logging_engine.logger.error', 'logger.error', (['"""The pickup factory of these items is not the same"""'], {}), "('The pickup factory of these items is not the same')\n", (17307, 17360), False, 'from src.utils.logging_engine import logger\n'), ((17666, 17733), 'src.utils.logging_engine.logger.error', 'logger.error', (['"""The delivery factory of these items is not the same"""'], {}), "('The delivery factory of these items is not the same')\n", (17678, 17733), False, 'from src.utils.logging_engine import logger\n'), ((12238, 12301), 'src.utils.logging_engine.logger.error', 'logger.error', (['f"""Planned route of vehicle {vehicle_id} is wrong"""'], {}), "(f'Planned route of vehicle {vehicle_id} is wrong')\n", (12250, 12301), False, 'from src.utils.logging_engine import logger\n'), ((4940, 4969), 'copy.copy', 'copy.copy', (['delivery_item_list'], {}), '(delivery_item_list)\n', (4949, 4969), False, 'import copy\n'), ((4159, 4188), 'copy.copy', 'copy.copy', (['delivery_item_list'], {}), '(delivery_item_list)\n', (4168, 4188), False, 'import copy\n')] |
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import random
from phi.fluidformat import *
def text_to_pixels(text, size=10, binary=False, as_numpy_array=True):
image = Image.new("1" if binary else "L", (len(text)*size*3//4, size), 0)
draw = ImageDraw.Draw(image)
try:
font = ImageFont.truetype("arial.ttf", size)
except:
font = ImageFont.truetype('Pillow/Tests/fonts/DejaVuSans.ttf', size=size)
draw.text((0,0), text, fill=255, font=font)
del draw
if as_numpy_array:
return np.array(image).astype(np.float32) / 255.0
else:
return image
# image = text_to_pixels("The", as_numpy_array=False)
# image.save("testimg.png", "PNG")
def alphabet_soup(shape, count, margin=1, total_content=100, fontsize=10):
if len(shape) != 4: raise ValueError("shape must be 4D")
array = np.zeros(shape)
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
for batch in range(shape[0]):
for i in range(count):
letter = letters[random.randint(0, len(letters)-1)]
tile = text_to_pixels(letter, fontsize)#[::-1, :]
y = random.randint(margin, shape[1] - margin - tile.shape[0] - 2)
x = random.randint(margin, shape[2] - margin - tile.shape[1] - 2)
array[batch, y:(y+tile.shape[0]), x:(x+tile.shape[1]), 0] += tile
return array.astype(np.float32) * total_content / np.sum(array)
def random_word(shape, min_count, max_count, margin=1, total_content=100, fontsize=10, y=40):
if len(shape) != 4: raise ValueError("shape must be 4D")
array = np.zeros(shape)
letters = '<KEY>'
for b in range(shape[0]):
count = random.randint(min_count, max_count)
for i in range(count):
letter = letters[random.randint(0, len(letters)-1)]
tile = text_to_pixels(letter, fontsize)#[::-1, :]
x = random.randint(margin, shape[2] - margin - tile.shape[1] - 2)
array[b, y:(y+tile.shape[0]), x:(x+tile.shape[1]), 0] += tile
return array.astype(np.float32) * total_content / np.sum(array)
def single_shape(shape, scene, margin=1, fluid_mask=None):
if len(shape) != 4: raise ValueError("shape must be 4D")
array = np.zeros(shape)
for batch in range(shape[0]):
img = scene.read_array("Shape", random.choice(scene.indices))[0,...]
while True:
y = random.randint(margin, shape[1] - margin - img.shape[0] - 2)
x = random.randint(margin, shape[2] - margin - img.shape[1] - 2)
array[batch, y:(y + img.shape[0]), x:(x + img.shape[1]), :] = img
if _all_density_valid(array[batch:batch+1,...], fluid_mask):
break
else:
array[batch,...] = 0
return array.astype(np.float32)
def _all_density_valid(density, fluid_mask):
if fluid_mask is None:
return True
return np.sum(density * fluid_mask) == np.sum(density)
def push_density_inside(density_tile, tile_location, fluid_mask): # (y, x)
"""
Tries to adjust the tile_location so that the density_tile does not overlap with any obstacles.
:param density_tile: 2D binary array, representing the density mask to be shifted
:param tile_location: the initial location of the tile, (1D array with 2 values)
:param fluid_mask: 2D binary array (must be larger than the tile)
:return: the shifted location (1D array with 2 values)
"""
x, y = np.meshgrid(*[np.linspace(-1, 1, d) for d in density_tile.shape])
location = np.array(tile_location, dtype=np.int)
def cropped_mask(location):
slices = [slice(location[i], location[i]+density_tile.shape[i]) for i in range(2)]
return fluid_mask[slices]
while True:
cropped_fluid_mask = cropped_mask(location)
overlap = density_tile * (1-cropped_fluid_mask)
if np.sum(overlap) == 0:
return location
update = -np.sign([np.sum(overlap * y), np.sum(overlap * x)]).astype(np.int)
if np.all(update == 0):
raise ValueError("Failed to push tile with initial location %s out of obstacle" % (tile_location,))
location += update
# print(alphabet_soup([1, 16, 16, 1], 1000)[0,:,:,0])
# result = single_shape((2, 64, 64, 1), scene_at("data/shapelib/sim_000000"))
# print(result.shape, np.sum(result))
# Test push_density_inside
# fluid_mask = np.ones([64, 64])
# fluid_mask[10:20, 10:20] = 0
# density_tile = np.ones([5,5])
# tile_location = (18,9)
# print(push_density_inside(density_tile, tile_location, fluid_mask)) | [
"random.choice",
"PIL.ImageFont.truetype",
"numpy.sum",
"numpy.array",
"PIL.ImageDraw.Draw",
"numpy.zeros",
"numpy.linspace",
"numpy.all",
"random.randint"
] | [((268, 289), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (282, 289), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((861, 876), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (869, 876), True, 'import numpy as np\n'), ((1620, 1635), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1628, 1635), True, 'import numpy as np\n'), ((2255, 2270), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2263, 2270), True, 'import numpy as np\n'), ((3555, 3592), 'numpy.array', 'np.array', (['tile_location'], {'dtype': 'np.int'}), '(tile_location, dtype=np.int)\n', (3563, 3592), True, 'import numpy as np\n'), ((314, 351), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', 'size'], {}), "('arial.ttf', size)\n", (332, 351), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1437, 1450), 'numpy.sum', 'np.sum', (['array'], {}), '(array)\n', (1443, 1450), True, 'import numpy as np\n'), ((1705, 1741), 'random.randint', 'random.randint', (['min_count', 'max_count'], {}), '(min_count, max_count)\n', (1719, 1741), False, 'import random\n'), ((2106, 2119), 'numpy.sum', 'np.sum', (['array'], {}), '(array)\n', (2112, 2119), True, 'import numpy as np\n'), ((2926, 2954), 'numpy.sum', 'np.sum', (['(density * fluid_mask)'], {}), '(density * fluid_mask)\n', (2932, 2954), True, 'import numpy as np\n'), ((2958, 2973), 'numpy.sum', 'np.sum', (['density'], {}), '(density)\n', (2964, 2973), True, 'import numpy as np\n'), ((4033, 4052), 'numpy.all', 'np.all', (['(update == 0)'], {}), '(update == 0)\n', (4039, 4052), True, 'import numpy as np\n'), ((379, 445), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""Pillow/Tests/fonts/DejaVuSans.ttf"""'], {'size': 'size'}), "('Pillow/Tests/fonts/DejaVuSans.ttf', size=size)\n", (397, 445), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1164, 1225), 'random.randint', 'random.randint', (['margin', '(shape[1] - margin - tile.shape[0] - 2)'], {}), '(margin, shape[1] - margin - tile.shape[0] - 2)\n', (1178, 1225), False, 'import random\n'), ((1242, 1303), 'random.randint', 'random.randint', (['margin', '(shape[2] - margin - tile.shape[1] - 2)'], {}), '(margin, shape[2] - margin - tile.shape[1] - 2)\n', (1256, 1303), False, 'import random\n'), ((1915, 1976), 'random.randint', 'random.randint', (['margin', '(shape[2] - margin - tile.shape[1] - 2)'], {}), '(margin, shape[2] - margin - tile.shape[1] - 2)\n', (1929, 1976), False, 'import random\n'), ((2418, 2478), 'random.randint', 'random.randint', (['margin', '(shape[1] - margin - img.shape[0] - 2)'], {}), '(margin, shape[1] - margin - img.shape[0] - 2)\n', (2432, 2478), False, 'import random\n'), ((2495, 2555), 'random.randint', 'random.randint', (['margin', '(shape[2] - margin - img.shape[1] - 2)'], {}), '(margin, shape[2] - margin - img.shape[1] - 2)\n', (2509, 2555), False, 'import random\n'), ((3887, 3902), 'numpy.sum', 'np.sum', (['overlap'], {}), '(overlap)\n', (3893, 3902), True, 'import numpy as np\n'), ((2345, 2373), 'random.choice', 'random.choice', (['scene.indices'], {}), '(scene.indices)\n', (2358, 2373), False, 'import random\n'), ((3488, 3509), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'd'], {}), '(-1, 1, d)\n', (3499, 3509), True, 'import numpy as np\n'), ((546, 561), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (554, 561), True, 'import numpy as np\n'), ((3964, 3983), 'numpy.sum', 'np.sum', (['(overlap * y)'], {}), '(overlap * y)\n', (3970, 3983), True, 'import numpy as np\n'), ((3985, 4004), 'numpy.sum', 'np.sum', (['(overlap * x)'], {}), '(overlap * x)\n', (3991, 4004), True, 'import numpy as np\n')] |
import sys
from typing import Callable, Dict, Optional, Tuple, Union
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
from datafold.utils.general import is_symmetric_matrix, sort_eigenpairs
class NumericalMathError(Exception):
"""Use for numerical problems/issues, such as singular matrices or too large
imaginary part."""
def __init__(self, message):
super(NumericalMathError, self).__init__(message)
def scipy_eigsolver(
kernel_matrix: Union[np.ndarray, scipy.sparse.csr_matrix],
n_eigenpairs: int,
is_symmetric: bool,
is_stochastic: bool,
):
"""Compute eigenpairs of kernel matrix with scipy backend.
The scipy solver is selected based on the number of eigenpairs to compute. Note
that also for dense matrix cases a sparse solver is selected. There are two reasons
for this decsision:
1. General dense matrix eigensolver only allow *all* eigenpairs to be computed. This
is computational more costly than handling a dense matrix to a sparse solver
which can also solve for `k` eigenvectors.
2. The hermitian (symmetric) `eigh` solver would also allow a partial computation of
eigenpairs, but it showed to be slower in microbenchmark tests than the sparse
solvers for dense matrices.
Internal selection of backend:
* If :code:`n_eigenpairs == n_samples` (for dense / sparse):
* symmetric `eigh <https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eigh.html#scipy.linalg.eigh>`_
* non-symmetric `eig <https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eig.html#scipy.linalg.eig>`_
* If :code:`n_eigenpairs < n_samples` (for dense / sparse):
* symmetric `eigsh <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.eigsh.html>`_
* non-symmetric `eigs <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.eigs.html#scipy.sparse.linalg.eigs>`_
Parameters
----------
kernel_matrix
Matrix of shape `(n_samples, n_samples)`.
n_eigenpairs
Number of eigenpairs to compute.
is_symmetric
True if matrix is symmetric. Note that there is no check and also numerical
noise that breaks the symmetry can lead to instabilities.
is_stochastic
If True, the kernel matrix is assumed to be row-stochastic. This enables
setting a `sigma` close to 1 to accelerate convergence.
Returns
-------
numpy.ndarray
eigenvalues of shape `(n_eigenpairs,)`
numpy.ndarray
eigenvectors of shape `(n_samples, n_eigenpairs)`
"""
n_samples, n_features = kernel_matrix.shape
# check only for n_eigenpairs == n_features and n_eigenpairs < n_features
# wrong parametrized n_eigenpairs are catched in scipy functions
if n_eigenpairs == n_features:
if is_symmetric:
scipy_eigvec_solver = scipy.linalg.eigh
else:
scipy_eigvec_solver = scipy.linalg.eig
solver_kwargs: Dict[str, object] = {
"check_finite": False
} # should be already checked
else: # n_eigenpairs < matrix.shape[1]
if is_symmetric:
scipy_eigvec_solver = scipy.sparse.linalg.eigsh
else:
scipy_eigvec_solver = scipy.sparse.linalg.eigs
solver_kwargs = {
"k": n_eigenpairs,
"which": "LM",
"v0": np.ones(n_samples),
"tol": 1e-14,
}
# The selection of sigma is a result of a microbenchmark
if is_symmetric and is_stochastic:
# NOTE: it turned out that for self.kernel_.is_symmetric=False (-> eigs),
# setting sigma=1 resulted into a slower computation.
NUMERICAL_EXACT_BREAKER = 0.1
solver_kwargs["sigma"] = 1.0 + NUMERICAL_EXACT_BREAKER
solver_kwargs["mode"] = "normal"
else:
solver_kwargs["sigma"] = None
# the scipy solvers only work on floating points
if scipy.sparse.issparse(
kernel_matrix
) and kernel_matrix.data.dtype.kind not in ["fdFD"]:
kernel_matrix = kernel_matrix.asfptype()
elif isinstance(kernel_matrix, np.ndarray) and kernel_matrix.dtype != "f":
kernel_matrix = kernel_matrix.astype(float)
eigvals, eigvects = scipy_eigvec_solver(kernel_matrix, **solver_kwargs)
return eigvals, eigvects
_valid_backends = ["scipy"]
def compute_kernel_eigenpairs(
kernel_matrix: Union[np.ndarray, scipy.sparse.csr_matrix],
n_eigenpairs: int,
is_symmetric: bool = False,
is_stochastic: bool = False,
normalize_eigenvectors: bool = False,
backend: str = "scipy",
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute eigenvalues and -vectors from kernel matrix with consideration of matrix
properties.
Parameters
----------
kernel_matrix
Kernel matrix of shape `(n_samples, n_samples)`.
n_eigenpairs
Number of eigenpairs to compute.
is_symmetric
If True, this allows using specialized algorithms for symmetric matrices and
enables an additional numerical sanity check that all eigenvalues are real-valued.
is_stochastic
If True, this allows convergence to be improved because the trivial first
eigenvalue is known and all following eigenvalues are smaller.
normalize_eigenvectors
If True, all eigenvectors are normalized to Eucledian norm 1.
backend
Valid backends:
* "scipy"
Returns
-------
numpy.ndarray
Eigenvalues in ascending order (absolute value).
numpy.ndarray
Eigenvectors (not necessarily normalized) in the same order to eigenvalues.
"""
if kernel_matrix.ndim != 2 or kernel_matrix.shape[0] != kernel_matrix.shape[1]:
raise ValueError(
f"kernel matrix must be a square. "
f"Got kernel_matrix.shape={kernel_matrix.shape}"
)
err_nonfinite = ValueError(
"kernel_matrix must only contain finite values (no np.nan " "or np.inf)"
)
if (
isinstance(kernel_matrix, scipy.sparse.spmatrix)
and not np.isfinite(kernel_matrix.data).all()
):
raise err_nonfinite
elif isinstance(kernel_matrix, np.ndarray) and not np.isfinite(kernel_matrix).all():
raise err_nonfinite
assert not is_symmetric or (is_symmetric and is_symmetric_matrix(kernel_matrix))
# BEGIN experimental code
# test_sparsify_experimental = False
# if test_sparsify_experimental:
#
# SPARSIFY_CUTOFF = 1e-14
#
# if scipy.sparse.issparse(kernel_matrix):
# kernel_matrix.data[np.abs(kernel_matrix.data) < SPARSIFY_CUTOFF] = 0
# kernel_matrix.eliminate_zeros()
# else:
# kernel_matrix[np.abs(kernel_matrix) < SPARSIFY_CUTOFF] = 0
# kernel_matrix = scipy.sparse.csr_matrix(kernel_matrix)
# END experimental
if backend == "scipy":
eigvals, eigvects = scipy_eigsolver(
kernel_matrix=kernel_matrix,
n_eigenpairs=n_eigenpairs,
is_symmetric=is_symmetric,
is_stochastic=is_stochastic,
)
else:
raise ValueError(f"backend {backend} not known.")
if not np.isfinite(eigvals).all() or not np.isfinite(eigvects).all():
raise NumericalMathError(
"eigenvalues or eigenvectors contain 'NaN' or 'inf' values."
)
if is_symmetric:
if np.any(eigvals.imag > 1e2 * sys.float_info.epsilon):
raise NumericalMathError(
"Eigenvalues have non-negligible imaginary part (larger than "
f"{1e2 * sys.float_info.epsilon})."
)
# algorithm can include numerical noise in imaginary part
eigvals = np.real(eigvals)
eigvects = np.real(eigvects)
if normalize_eigenvectors:
eigvects /= np.linalg.norm(eigvects, axis=0)[np.newaxis, :]
return sort_eigenpairs(eigvals, eigvects)
| [
"numpy.ones",
"datafold.utils.general.is_symmetric_matrix",
"numpy.any",
"numpy.real",
"datafold.utils.general.sort_eigenpairs",
"numpy.isfinite",
"numpy.linalg.norm"
] | [((8006, 8040), 'datafold.utils.general.sort_eigenpairs', 'sort_eigenpairs', (['eigvals', 'eigvects'], {}), '(eigvals, eigvects)\n', (8021, 8040), False, 'from datafold.utils.general import is_symmetric_matrix, sort_eigenpairs\n'), ((7519, 7572), 'numpy.any', 'np.any', (['(eigvals.imag > 100.0 * sys.float_info.epsilon)'], {}), '(eigvals.imag > 100.0 * sys.float_info.epsilon)\n', (7525, 7572), True, 'import numpy as np\n'), ((7840, 7856), 'numpy.real', 'np.real', (['eigvals'], {}), '(eigvals)\n', (7847, 7856), True, 'import numpy as np\n'), ((7876, 7893), 'numpy.real', 'np.real', (['eigvects'], {}), '(eigvects)\n', (7883, 7893), True, 'import numpy as np\n'), ((3431, 3449), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (3438, 3449), True, 'import numpy as np\n'), ((6431, 6465), 'datafold.utils.general.is_symmetric_matrix', 'is_symmetric_matrix', (['kernel_matrix'], {}), '(kernel_matrix)\n', (6450, 6465), False, 'from datafold.utils.general import is_symmetric_matrix, sort_eigenpairs\n'), ((7946, 7978), 'numpy.linalg.norm', 'np.linalg.norm', (['eigvects'], {'axis': '(0)'}), '(eigvects, axis=0)\n', (7960, 7978), True, 'import numpy as np\n'), ((6191, 6222), 'numpy.isfinite', 'np.isfinite', (['kernel_matrix.data'], {}), '(kernel_matrix.data)\n', (6202, 6222), True, 'import numpy as np\n'), ((7306, 7326), 'numpy.isfinite', 'np.isfinite', (['eigvals'], {}), '(eigvals)\n', (7317, 7326), True, 'import numpy as np\n'), ((7340, 7361), 'numpy.isfinite', 'np.isfinite', (['eigvects'], {}), '(eigvects)\n', (7351, 7361), True, 'import numpy as np\n'), ((6319, 6345), 'numpy.isfinite', 'np.isfinite', (['kernel_matrix'], {}), '(kernel_matrix)\n', (6330, 6345), True, 'import numpy as np\n')] |
########################################
__author__ = "<NAME>"
__license__ = "GNU GPLv3"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
########################################
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.conv import _ConvNd
import numpy as np
from scipy.stats import poisson
from scipy import signal
class NConvUNet(nn.Module):
def __init__(self, in_ch, out_ch, num_channels=2, pos_fn='SoftPlus'):
super().__init__()
self.__name__ = 'NConvUNet'
self.nconv1 = NConv2d(in_ch, in_ch * num_channels, (5, 5), pos_fn, 'k', padding=(2,2))
self.nconv2 = NConv2d(in_ch * num_channels, in_ch * num_channels, (5, 5), pos_fn, 'k', padding=(2,2))
self.nconv3 = NConv2d(in_ch * num_channels, in_ch * num_channels, (5, 5), pos_fn, 'k', padding=(2,2))
self.nconv4 = NConv2d(2 * in_ch * num_channels, in_ch * num_channels, (3, 3), pos_fn, 'k', padding=(1,1))
self.nconv5 = NConv2d(2 * in_ch * num_channels, in_ch * num_channels, (3, 3), pos_fn, 'k', padding=(1,1))
self.nconv6 = NConv2d(2 * in_ch * num_channels, in_ch * num_channels, (3, 3), pos_fn, 'k', padding=(1,1))
self.nconv7 = NConv2d(in_ch * num_channels, out_ch, (1, 1), pos_fn, 'k')
def forward(self, x0, c0):
x1, c1 = self.nconv1(x0, c0)
x1, c1 = self.nconv2(x1, c1)
x1, c1 = self.nconv3(x1, c1)
# Downsample 1
ds = 2
c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)
x1_ds = torch.zeros(c1_ds.size()).to(x0.get_device())
for i in range(x1_ds.size(0)):
for j in range(x1_ds.size(1)):
x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])
c1_ds /= 4
x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)
x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)
# Downsample 2
ds = 2
c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)
x2_dss = torch.zeros(c2_dss.size()).to(x0.get_device())
for i in range(x2_dss.size(0)):
for j in range(x2_dss.size(1)):
x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])
c2_dss /= 4
x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)
# Downsample 3
ds = 2
c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)
x3_dss = torch.zeros(c3_dss.size()).to(x0.get_device())
for i in range(x3_dss.size(0)):
for j in range(x3_dss.size(1)):
x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])
c3_dss /= 4
x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)
# Upsample 1
x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')
c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')
x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((c3_ds, c4), 1))
# Upsample 2
x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')
c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')
x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat((c2_ds, c34), 1))
# Upsample 3
x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')
c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')
xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23, c1), 1))
xout, cout = self.nconv7(xout, cout)
return xout, cout
# Normalized Convolution Layer
class NConv2d(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, pos_fn='softplus', init_method='n', stride=(1,1), padding=(0,0), dilation=(1,1), groups=1, bias=True):
# Call _ConvNd constructor
super(NConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, (0,0), groups, bias, padding_mode='zeros')
self.eps = 1e-20
self.pos_fn = pos_fn
self.init_method = init_method
# Initialize weights and bias
self.init_parameters()
if self.pos_fn is not None :
EnforcePos.apply(self, 'weight', pos_fn)
def forward(self, data, conf):
# Normalized Convolution
denom = F.conv2d(conf, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
nomin = F.conv2d(data*conf, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
nconv = nomin / (denom+self.eps)
# Add bias
b = self.bias
sz = b.size(0)
b = b.view(1,sz,1,1)
b = b.expand_as(nconv)
nconv += b
# Propagate confidence
cout = denom
sz = cout.size()
cout = cout.view(sz[0], sz[1], -1)
k = self.weight
k_sz = k.size()
k = k.view(k_sz[0], -1)
s = torch.sum(k, dim=-1, keepdim=True)
cout = cout / s
cout = cout.view(sz)
return nconv, cout
def enforce_pos(self):
p = self.weight
if self.pos_fn.lower() == 'softmax':
p_sz = p.size()
p = p.view(p_sz[0],p_sz[1], -1)
p = F.softmax(p, -1).data
self.weight.data = p.view(p_sz)
elif self.pos_fn.lower() == 'exp':
self.weight.data = torch.exp(p).data
elif self.pos_fn.lower() == 'softplus':
self.weight.data = F.softplus(p, beta=10).data
elif self.pos_fn.lower() == 'sigmoid':
self.weight.data = F.sigmoid(p).data
else:
print('Undefined positive function!')
return
def init_parameters(self):
# Init weights
if self.init_method == 'x': # Xavier
torch.nn.init.xavier_uniform_(self.weight)
elif self.init_method == 'k': # Kaiming
torch.nn.init.kaiming_uniform_(self.weight)
elif self.init_method == 'n': # Normal dist
n = self.kernel_size[0] * self.kernel_size[1] * self.out_channels
self.weight.data.normal_(2, math.sqrt(2. / n))
elif self.init_method == 'p': # Poisson
mu=self.kernel_size[0]/2
dist = poisson(mu)
x = np.arange(0, self.kernel_size[0])
y = np.expand_dims(dist.pmf(x),1)
w = signal.convolve2d(y, y.transpose(), 'full')
w = torch.Tensor(w).type_as(self.weight)
w = torch.unsqueeze(w,0)
w = torch.unsqueeze(w,1)
w = w.repeat(self.out_channels, 1, 1, 1)
w = w.repeat(1, self.in_channels, 1, 1)
self.weight.data = w + torch.rand(w.shape)
# Init bias
self.bias = torch.nn.Parameter(torch.zeros(self.out_channels)+0.01)
class EnforcePos(object):
def __init__(self, name, pos_fn):
self.name = name
self.pos_fn = pos_fn
def compute_weight(self, module):
return _pos(getattr(module, self.name + '_p'), self.pos_fn)
@staticmethod
def apply(module, name, pos_fn):
fn = EnforcePos(name, pos_fn)
weight = getattr(module, name)
# remove w from parameter list
del module._parameters[name]
#
module.register_parameter(name + '_p', Parameter(_pos(weight, pos_fn).data))
setattr(module, name, fn.compute_weight(module))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
weight = self.compute_weight(module)
delattr(module, self.name)
del module._parameters[self.name + '_p']
module.register_parameter(self.name, Parameter(weight.data))
def __call__(self, module, inputs):
setattr(module, self.name, self.compute_weight(module))
def _pos(p, pos_fn):
pos_fn = pos_fn.lower()
if pos_fn == 'softmax':
p_sz = p.size()
p = p.view(p_sz[0],p_sz[1], -1)
p = F.softmax(p, -1)
return p.view(p_sz)
elif pos_fn == 'exp':
return torch.exp(p)
elif pos_fn == 'softplus':
return F.softplus(p, beta=10)
elif pos_fn == 'sigmoid':
return F.sigmoid(p)
else:
print('Undefined positive function!')
return
def remove_weight_pos(module, name='weight'):
r"""Removes the weight normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = weight_norm(nn.Linear(20, 40))
>>> remove_weight_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, EnforcePos) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("weight_norm of '{}' not found in {}"
.format(name, module)) | [
"torch.nn.functional.conv2d",
"torch.nn.parameter.Parameter",
"torch.rand",
"numpy.arange",
"torch.nn.init.xavier_uniform_",
"torch.unsqueeze",
"torch.Tensor",
"torch.exp",
"torch.nn.init.kaiming_uniform_",
"torch.nn.functional.sigmoid",
"torch.nn.functional.softplus",
"torch.sum",
"scipy.st... | [((1536, 1581), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['c1', 'ds', 'ds'], {'return_indices': '(True)'}), '(c1, ds, ds, return_indices=True)\n', (1548, 1581), True, 'import torch.nn.functional as F\n'), ((2012, 2060), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['c2_ds', 'ds', 'ds'], {'return_indices': '(True)'}), '(c2_ds, ds, ds, return_indices=True)\n', (2024, 2060), True, 'import torch.nn.functional as F\n'), ((2454, 2502), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['c3_ds', 'ds', 'ds'], {'return_indices': '(True)'}), '(c3_ds, ds, ds, return_indices=True)\n', (2466, 2502), True, 'import torch.nn.functional as F\n'), ((4444, 4536), 'torch.nn.functional.conv2d', 'F.conv2d', (['conf', 'self.weight', 'None', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(conf, self.weight, None, self.stride, self.padding, self.dilation,\n self.groups)\n', (4452, 4536), True, 'import torch.nn.functional as F\n'), ((4581, 4681), 'torch.nn.functional.conv2d', 'F.conv2d', (['(data * conf)', 'self.weight', 'None', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(data * conf, self.weight, None, self.stride, self.padding, self.\n dilation, self.groups)\n', (4589, 4681), True, 'import torch.nn.functional as F\n'), ((5131, 5165), 'torch.sum', 'torch.sum', (['k'], {'dim': '(-1)', 'keepdim': '(True)'}), '(k, dim=-1, keepdim=True)\n', (5140, 5165), False, 'import torch\n'), ((8232, 8248), 'torch.nn.functional.softmax', 'F.softmax', (['p', '(-1)'], {}), '(p, -1)\n', (8241, 8248), True, 'import torch.nn.functional as F\n'), ((3029, 3054), 'torch.cat', 'torch.cat', (['(x3_ds, x4)', '(1)'], {}), '((x3_ds, x4), 1)\n', (3038, 3054), False, 'import torch\n'), ((3056, 3081), 'torch.cat', 'torch.cat', (['(c3_ds, c4)', '(1)'], {}), '((c3_ds, c4), 1)\n', (3065, 3081), False, 'import torch\n'), ((3282, 3308), 'torch.cat', 'torch.cat', (['(x2_ds, x34)', '(1)'], {}), '((x2_ds, x34), 1)\n', (3291, 3308), False, 'import torch\n'), ((3310, 3336), 'torch.cat', 'torch.cat', (['(c2_ds, c34)', '(1)'], {}), '((c2_ds, c34), 1)\n', (3319, 3336), False, 'import torch\n'), ((3527, 3550), 'torch.cat', 'torch.cat', (['(x23, x1)', '(1)'], {}), '((x23, x1), 1)\n', (3536, 3550), False, 'import torch\n'), ((3552, 3575), 'torch.cat', 'torch.cat', (['(c23, c1)', '(1)'], {}), '((c23, c1), 1)\n', (3561, 3575), False, 'import torch\n'), ((6026, 6068), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['self.weight'], {}), '(self.weight)\n', (6055, 6068), False, 'import torch\n'), ((7949, 7971), 'torch.nn.parameter.Parameter', 'Parameter', (['weight.data'], {}), '(weight.data)\n', (7958, 7971), False, 'from torch.nn.parameter import Parameter\n'), ((8318, 8330), 'torch.exp', 'torch.exp', (['p'], {}), '(p)\n', (8327, 8330), False, 'import torch\n'), ((5453, 5469), 'torch.nn.functional.softmax', 'F.softmax', (['p', '(-1)'], {}), '(p, -1)\n', (5462, 5469), True, 'import torch.nn.functional as F\n'), ((6129, 6172), 'torch.nn.init.kaiming_uniform_', 'torch.nn.init.kaiming_uniform_', (['self.weight'], {}), '(self.weight)\n', (6159, 6172), False, 'import torch\n'), ((6994, 7024), 'torch.zeros', 'torch.zeros', (['self.out_channels'], {}), '(self.out_channels)\n', (7005, 7024), False, 'import torch\n'), ((8377, 8399), 'torch.nn.functional.softplus', 'F.softplus', (['p'], {'beta': '(10)'}), '(p, beta=10)\n', (8387, 8399), True, 'import torch.nn.functional as F\n'), ((5593, 5605), 'torch.exp', 'torch.exp', (['p'], {}), '(p)\n', (5602, 5605), False, 'import torch\n'), ((8445, 8457), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['p'], {}), '(p)\n', (8454, 8457), True, 'import torch.nn.functional as F\n'), ((5690, 5712), 'torch.nn.functional.softplus', 'F.softplus', (['p'], {'beta': '(10)'}), '(p, beta=10)\n', (5700, 5712), True, 'import torch.nn.functional as F\n'), ((6467, 6478), 'scipy.stats.poisson', 'poisson', (['mu'], {}), '(mu)\n', (6474, 6478), False, 'from scipy.stats import poisson\n'), ((6495, 6528), 'numpy.arange', 'np.arange', (['(0)', 'self.kernel_size[0]'], {}), '(0, self.kernel_size[0])\n', (6504, 6528), True, 'import numpy as np\n'), ((6704, 6725), 'torch.unsqueeze', 'torch.unsqueeze', (['w', '(0)'], {}), '(w, 0)\n', (6719, 6725), False, 'import torch\n'), ((6741, 6762), 'torch.unsqueeze', 'torch.unsqueeze', (['w', '(1)'], {}), '(w, 1)\n', (6756, 6762), False, 'import torch\n'), ((5796, 5808), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['p'], {}), '(p)\n', (5805, 5808), True, 'import torch.nn.functional as F\n'), ((6902, 6921), 'torch.rand', 'torch.rand', (['w.shape'], {}), '(w.shape)\n', (6912, 6921), False, 'import torch\n'), ((6651, 6666), 'torch.Tensor', 'torch.Tensor', (['w'], {}), '(w)\n', (6663, 6666), False, 'import torch\n')] |
## -*- coding: utf-8 -*
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import numpy as np
import math
class Draw(QGraphicsItem):
def __init__(self,width=180, height=180, size=90):
super(Draw,self).__init__()
self.offsetx = 10
self.offsety = 10
self.width = width*2
self.height = height*2
self.size = size
# this is the initial parameters for turn
self.startx = 90
self.starty = 90
self.sla_start_x = 90
self.sla_start_y =180 - 50
self.tread = 63
self.entry_vel = 1.0
self.ang_accel = 0.01758
self.weight = 0.080
self.angvel_list = []
self.pos_x_theo = []
self.pos_y_theo = []
self.pos_x_real = []
self.pos_y_real = []
self.r_tire_pos_x = []
self.r_tire_pos_y = []
self.l_tire_pos_x = []
self.l_tire_pos_y = []
def paint(self, painter, option, widget):
painter.setPen(QColor(0,0,0))
for i in range(0,5):
painter.drawLine(self.offsetx + i*self.size, self.offsety,self.offsetx + i*self.size, self.offsety + self.width)
painter.drawLine(self.offsetx, self.offsety + i*self.size, self.offsetx + self.height, self.offsety + i*self.size)
for i in range(1,4,2):
painter.drawLine(self.offsetx + self.size*i,self.offsety + self.size *4,self.offsetx + self.size*4,self.offsety+self.size*i)
painter.drawLine(self.offsetx, self.offsety + self.size*i,self.offsetx + self.size*i,self.offsety)
painter.drawLine(self.offsetx,self.offsety+self.size*3,self.offsetx + self.size,self.offsety + self.size*4)
painter.drawLine(self.offsetx,self.offsety+self.size*1,self.offsetx + self.size*3,self.offsety + self.size*4)
painter.drawLine(self.offsetx+self.size*1,self.offsety,self.offsetx + self.size*4,self.offsety + self.size*3)
painter.drawLine(self.offsetx+self.size*3,self.offsety,self.offsetx + self.size*4,self.offsety + self.size*1)
painter.setBrush(Qt.red)
painter.drawRect(0,0,self.offsetx*2,self.offsety + self.width)
painter.drawRect(0,0,self.offsety + self.height,self.offsety*2)
painter.drawRect(self.size*2,self.size*2,self.offsetx*2,self.size*2 + self.offsety)
painter.setPen(QColor(255,0,0))
for i in range(len(self.pos_x_theo)):
painter.drawPoint(self.pos_x_theo[i],self.pos_y_theo[i])
painter.setPen(QColor(255,0,255))
for i in range(len(self.pos_x_real)):
painter.drawPoint(self.pos_x_real[i],self.pos_y_real[i])
painter.setPen(QColor(0,255,0))
for i in range(len(self.l_tire_pos_x)):
painter.drawPoint(self.l_tire_pos_x[i],self.l_tire_pos_y[i])
painter.setPen(QColor(0,255,0))
for i in range(len(self.r_tire_pos_x)):
painter.drawPoint(self.r_tire_pos_x[i],self.r_tire_pos_y[i])
def cacl(self,target_ang,init_speed = 700,max_G = 0.5):
precision = 1/10
angvel = 0
mypos_x_theo =0
mypos_y_theo = 0
mypos_x_real = 0
mypos_y_real = 0
r_tire_x = 0
r_tire_y = 0
l_tire_x = 0
l_tire_y = 0
theta_theo = 0
theta_real = 0
theta_right = 0
theta_left = 0
count = 0
second_count = 0
speed_r = 0
speed_l = 0
const = 100
beta = 0
G = 0
ang_vel_beta = 0
flag = 0
del self.pos_x_theo[:]
del self.pos_y_theo[:]
del self.pos_x_real[:]
del self.pos_y_real[:]
del self.r_tire_pos_x[:]
del self.r_tire_pos_y[:]
del self.l_tire_pos_x[:]
del self.l_tire_pos_y[:]
del self.angvel_list[:]
# while(theta_theo - beta < target_ang):
# while((angvel + ang_vel_beta) >= 0):
while(angvel >= 0):
# while((angvel ) >= 0):
if (G < max_G and flag == 0):
angvel += self.ang_accel * precision
chro_end_ang = theta_theo
elif theta_theo < (target_ang - chro_end_ang):
# elif theta_theo-beta < (target_ang - chro_end_ang):
flag = 1
pass
# elif theta_theo <= target_ang:
# elif (angvel + ang_vel_beta) >= 0:
elif (angvel) >= 0:
angvel += -self.ang_accel * precision
theta_theo += angvel * precision
ang_vel_beta = (-beta*const/init_speed + angvel) * precision
beta += (-beta*const/init_speed + angvel) * precision
try:
radius = 1/math.radians(angvel) #radius in mm
except:
radius = 10000
G = radius/1000 * (math.radians(angvel * init_speed + ang_vel_beta)) **2 / 9.8
mypos_x_theo += np.cos(math.radians(90.0-theta_theo))*precision
mypos_y_theo += np.sin((90.0-theta_theo)*math.pi/180.0)*precision
mypos_x_real += np.cos(math.radians(90.0-theta_theo+beta))*precision
mypos_y_real += np.sin((90.0-theta_theo+beta)*math.pi/180.0)*precision
r_tire_x = mypos_x_real+self.tread*0.5*np.cos((theta_theo-beta)*math.pi/180.0)
r_tire_y = mypos_y_real-self.tread*0.5*np.sin((theta_theo-beta)*math.pi/180.0)
l_tire_x = mypos_x_real-self.tread*0.5*np.cos((theta_theo-beta)*math.pi/180.0)
l_tire_y = mypos_y_real+self.tread*0.5*np.sin((theta_theo-beta)*math.pi/180.0)
if(count == 1/precision):
self.angvel_list.append(angvel)
print(angvel,theta_theo,theta_theo-beta,beta,G)
count = 0
count +=1
self.pos_x_theo.append(self.sla_start_x + self.offsetx + mypos_x_theo)
self.pos_y_theo.append(self.size*4 -(self.sla_start_y + self.offsety + mypos_y_theo))
self.pos_x_real.append(self.sla_start_x + self.offsetx + mypos_x_real)
self.pos_y_real.append(self.size*4 -(self.sla_start_y + self.offsety + mypos_y_real))
self.l_tire_pos_x.append(self.sla_start_x + l_tire_x + self.offsetx)
self.l_tire_pos_y.append(self.size*4 - (self.sla_start_y + self.offsety + l_tire_y))
self.r_tire_pos_x.append(self.sla_start_x + r_tire_x + self.offsetx)
self.r_tire_pos_y.append(self.size*4 - (self.sla_start_y + self.offsety + r_tire_y))
if len(self.pos_x_theo) > 5000:break
# if(target_ang < 120 or target_ang > 85):
# for i in range(len(self.pos_x_theo)):
# self.pos_y_real[i] += (self.sla_start_y + self.offsety*2 + mypos_y_real) - self.size*3
# self.pos_y_theo[i] += (self.sla_start_y + self.offsety*2 + mypos_y_real) - self.size*3
# self.l_tire_pos_y[i] += (self.sla_start_y + self.offsety*2 + mypos_y_real) - self.size*3
# self.r_tire_pos_y[i] += (self.sla_start_y + self.offsety*2 + mypos_y_real) - self.size*3
self.update()
return beta
def save(self):
# f = open('test.c','w')
# f.writelines(self.angvel_list)
# f.close()
csv_file = u"test"
csv_filename = csv_file + ".c"
np.savetxt(csv_filename, (self.angvel_list), delimiter=",",header=" ",fmt='%f')
class MainWindow(QWidget):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.graphicsView = QGraphicsView()
scene = QGraphicsScene(self.graphicsView)
scene.setSceneRect(0, 0, 400, 400)
self.graphicsView.setScene(scene)
self.draw = Draw()
scene.addItem(self.draw)
self.init_vel = QLineEdit()
self.init_vel.setText(str("1000"))
self.maxG = QLineEdit()
self.maxG.setText(str("1.0"))
lineLayout = QVBoxLayout()
lineLayout.addWidget(QLabel("Entering Velocity[mm/s]"))
lineLayout.addWidget(self.init_vel)
lineLayout.addWidget(QLabel("Max_G"))
lineLayout.addWidget(self.maxG)
self.runButton = QPushButton("&Run")
self.runButton.clicked.connect(self.run)
self.saveButton = QPushButton("&Save")
self.saveButton.clicked.connect(self.save)
buttonLayout = QVBoxLayout()
buttonLayout.addWidget(self.runButton)
buttonLayout.addWidget(self.saveButton)
self.t_45 = QRadioButton("45")
self.short90 = QRadioButton("short90")
self.long90 = QRadioButton("long90")
self.t_135 = QRadioButton("135")
self.t_180 = QRadioButton("180")
self.t_v= QRadioButton("vturn")
self.bg1 = QButtonGroup()
self.bg1.addButton(self.t_45)
self.bg1.addButton(self.short90)
self.bg1.addButton(self.long90)
self.bg1.addButton(self.t_135)
self.bg1.addButton(self.t_180)
self.bg1.addButton(self.t_v)
vbox = QVBoxLayout()
vbox.addWidget(self.t_45)
vbox.addWidget(self.short90)
vbox.addWidget(self.long90)
vbox.addWidget(self.t_135)
vbox.addWidget(self.t_180)
vbox.addWidget(self.t_v)
propertyLayout = QVBoxLayout()
propertyLayout.setAlignment(Qt.AlignTop)
propertyLayout.addLayout(lineLayout)
propertyLayout.addLayout(vbox)
propertyLayout.addLayout(buttonLayout)
mainLayout = QHBoxLayout()
mainLayout.setAlignment(Qt.AlignTop)
mainLayout.addWidget(self.graphicsView)
mainLayout.addLayout(propertyLayout)
self.setLayout(mainLayout)
self.setWindowTitle("Turn Simulator")
self.updating_rule = False
def run(self):
self.graphicsView.update()
if self.t_45.isChecked():
QMessageBox.about(self,"Message","45 turn")
beta = self.draw.cacl(45,int(self.init_vel.text()),float(self.maxG.text()))
self.graphicsView.update()
self.draw.cacl(45+beta,int(self.init_vel.text()),float(self.maxG.text()))
elif self.short90.isChecked():
QMessageBox.about(self,"Message","short 90 turn")
beta = self.draw.cacl(90,int(self.init_vel.text()),float(self.maxG.text()))
self.graphicsView.update()
self.draw.cacl(90+beta,int(self.init_vel.text()),float(self.maxG.text()))
elif self.long90.isChecked():
QMessageBox.about(self,"Message","long 90 turn")
beta = self.draw.cacl(90,int(self.init_vel.text()),float(self.maxG.text()))
self.graphicsView.update()
self.draw.cacl(90+beta,int(self.init_vel.text()),float(self.maxG.text()))
elif self.t_135.isChecked():
QMessageBox.about(self,"Message","135 turn")
beta = self.draw.cacl(135,int(self.init_vel.text()),float(self.maxG.text()))
self.graphicsView.update()
self.draw.cacl(135+beta,int(self.init_vel.text()),float(self.maxG.text()))
elif self.t_180.isChecked():
QMessageBox.about(self,"Message","vturn")
beta = self.draw.cacl(180,int(self.init_vel.text()),float(self.maxG.text()))
# beta1 = self.draw.cacl(180)
# beta2 = self.draw.cacl(180+beta1)
# print(beta1,beta2)
elif self.t_v.isChecked():
QMessageBox.about(self,"Message","vturn")
def save(self):
self.draw.save()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())
| [
"numpy.sin",
"numpy.cos",
"numpy.savetxt",
"math.radians"
] | [((7293, 7372), 'numpy.savetxt', 'np.savetxt', (['csv_filename', 'self.angvel_list'], {'delimiter': '""","""', 'header': '""" """', 'fmt': '"""%f"""'}), "(csv_filename, self.angvel_list, delimiter=',', header=' ', fmt='%f')\n", (7303, 7372), True, 'import numpy as np\n'), ((4987, 5032), 'numpy.sin', 'np.sin', (['((90.0 - theta_theo) * math.pi / 180.0)'], {}), '((90.0 - theta_theo) * math.pi / 180.0)\n', (4993, 5032), True, 'import numpy as np\n'), ((5146, 5198), 'numpy.sin', 'np.sin', (['((90.0 - theta_theo + beta) * math.pi / 180.0)'], {}), '((90.0 - theta_theo + beta) * math.pi / 180.0)\n', (5152, 5198), True, 'import numpy as np\n'), ((4696, 4716), 'math.radians', 'math.radians', (['angvel'], {}), '(angvel)\n', (4708, 4716), False, 'import math\n'), ((4918, 4949), 'math.radians', 'math.radians', (['(90.0 - theta_theo)'], {}), '(90.0 - theta_theo)\n', (4930, 4949), False, 'import math\n'), ((5072, 5110), 'math.radians', 'math.radians', (['(90.0 - theta_theo + beta)'], {}), '(90.0 - theta_theo + beta)\n', (5084, 5110), False, 'import math\n'), ((5252, 5297), 'numpy.cos', 'np.cos', (['((theta_theo - beta) * math.pi / 180.0)'], {}), '((theta_theo - beta) * math.pi / 180.0)\n', (5258, 5297), True, 'import numpy as np\n'), ((5343, 5388), 'numpy.sin', 'np.sin', (['((theta_theo - beta) * math.pi / 180.0)'], {}), '((theta_theo - beta) * math.pi / 180.0)\n', (5349, 5388), True, 'import numpy as np\n'), ((5434, 5479), 'numpy.cos', 'np.cos', (['((theta_theo - beta) * math.pi / 180.0)'], {}), '((theta_theo - beta) * math.pi / 180.0)\n', (5440, 5479), True, 'import numpy as np\n'), ((5525, 5570), 'numpy.sin', 'np.sin', (['((theta_theo - beta) * math.pi / 180.0)'], {}), '((theta_theo - beta) * math.pi / 180.0)\n', (5531, 5570), True, 'import numpy as np\n'), ((4822, 4870), 'math.radians', 'math.radians', (['(angvel * init_speed + ang_vel_beta)'], {}), '(angvel * init_speed + ang_vel_beta)\n', (4834, 4870), False, 'import math\n')] |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
from torch.utils.data import Dataset
import numpy as np
import time
import os
import cv2
import sys
import utils
from datasets.scannet_scene import ScanNetScene
class PlaneDatasetSingle(Dataset):
def __init__(self, options, config, split, random=True, loadNeighborImage=False, load_semantics=False, load_boundary=False):
self.options = options
self.config = config
self.split = split
self.random = random
self.dataFolder = options.dataFolder
self.scenes = []
self.sceneImageIndices = []
self.loadClassMap()
planenet_scene_ids_val = np.load('datasets/scene_ids_val.npy')
planenet_scene_ids_val = {scene_id.decode('utf-8'): True for scene_id in planenet_scene_ids_val}
with open(self.dataFolder + '/ScanNet/Tasks/Benchmark/scannetv1_' + split + '.txt') as f:
for line in f:
scene_id = line.strip()
if split == 'test':
## Remove scenes which are in PlaneNet's training set for fair comparison
if scene_id not in planenet_scene_ids_val:
continue
pass
scenePath = self.dataFolder + '/scans/' + scene_id
if not os.path.exists(scenePath + '/' + scene_id + '.txt') or not os.path.exists(scenePath + '/annotation/planes.npy'):
continue
scene = ScanNetScene(options, scenePath, scene_id, self.confident_labels, self.layout_labels, load_semantics=load_semantics, load_boundary=load_boundary)
self.scenes.append(scene)
self.sceneImageIndices += [[len(self.scenes) - 1, imageIndex] for imageIndex in range(len(scene.imagePaths))]
continue
pass
if random:
t = int(time.time() * 1000000)
np.random.seed(((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24))
else:
np.random.seed(0)
pass
np.random.shuffle(self.sceneImageIndices)
self.invalid_indices = {}
with open(self.dataFolder + '/invalid_indices_' + split + '.txt', 'r') as f:
for line in f:
tokens = line.split(' ')
if len(tokens) == 3:
assert(int(tokens[2]) < 10000)
invalid_index = int(tokens[1]) * 10000 + int(tokens[2])
if invalid_index not in self.invalid_indices:
self.invalid_indices[invalid_index] = True
pass
pass
continue
pass
self.sceneImageIndices = [[sceneIndex, imageIndex] for sceneIndex, imageIndex in self.sceneImageIndices if (sceneIndex * 10000 + imageIndex) not in self.invalid_indices]
print('num images', len(self.sceneImageIndices))
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
self.loadNeighborImage = loadNeighborImage
return
def loadClassMap(self):
classLabelMap = {}
with open(self.dataFolder + '/scannetv2-labels.combined.tsv') as info_file:
line_index = 0
for line in info_file:
if line_index > 0:
line = line.split('\t')
key = line[1].strip()
if line[4].strip() != '':
label = int(line[4].strip())
else:
label = -1
pass
classLabelMap[key] = label
classLabelMap[key + 's'] = label
classLabelMap[key + 'es'] = label
pass
line_index += 1
continue
pass
confidentClasses = {'wall': True,
'floor': True,
'cabinet': True,
'bed': True,
'chair': False,
'sofa': False,
'table': True,
'door': True,
'window': True,
'bookshelf': False,
'picture': True,
'counter': True,
'blinds': False,
'desk': True,
'shelf': False,
'shelves': False,
'curtain': False,
'dresser': True,
'pillow': False,
'mirror': False,
'entrance': True,
'floor mat': True,
'clothes': False,
'ceiling': True,
'book': False,
'books': False,
'refridgerator': True,
'television': True,
'paper': False,
'towel': False,
'shower curtain': False,
'box': True,
'whiteboard': True,
'person': False,
'night stand': True,
'toilet': False,
'sink': False,
'lamp': False,
'bathtub': False,
'bag': False,
'otherprop': False,
'otherstructure': False,
'otherfurniture': False,
'unannotated': False,
'': False
}
self.confident_labels = {}
for name, confidence in confidentClasses.items():
if confidence and name in classLabelMap:
self.confident_labels[classLabelMap[name]] = True
pass
continue
self.layout_labels = {1: True, 2: True, 22: True, 9: True}
return
def __len__(self):
return len(self.sceneImageIndices)
def transformPlanes(self, transformation, planes):
planeOffsets = np.linalg.norm(planes, axis=-1, keepdims=True)
centers = planes
centers = np.concatenate([centers, np.ones((planes.shape[0], 1))], axis=-1)
newCenters = np.transpose(np.matmul(transformation, np.transpose(centers)))
newCenters = newCenters[:, :3] / newCenters[:, 3:4]
refPoints = planes - planes / np.maximum(planeOffsets, 1e-4)
refPoints = np.concatenate([refPoints, np.ones((planes.shape[0], 1))], axis=-1)
newRefPoints = np.transpose(np.matmul(transformation, np.transpose(refPoints)))
newRefPoints = newRefPoints[:, :3] / newRefPoints[:, 3:4]
planeNormals = newRefPoints - newCenters
planeNormals /= np.linalg.norm(planeNormals, axis=-1, keepdims=True)
planeOffsets = np.sum(newCenters * planeNormals, axis=-1, keepdims=True)
newPlanes = planeNormals * planeOffsets
return newPlanes
def __getitem__(self, index):
t = int(time.time() * 1000000)
np.random.seed(((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24))
if self.config.ANCHOR_TYPE == 'layout':
return self.getItemLayout(index)
if self.config.ANCHOR_TYPE == 'structure':
return self.getItemStructure(index)
while True:
if self.random:
index = np.random.randint(len(self.sceneImageIndices))
else:
index = index % len(self.sceneImageIndices)
pass
sceneIndex, imageIndex = self.sceneImageIndices[index]
scene = self.scenes[sceneIndex]
try:
image, planes, plane_info, segmentation, depth, camera, extrinsics = scene[imageIndex]
if len(planes) == 0:
index += 1
continue
except:
index += 1
continue
pass
if segmentation.max() < 0:
index += 1
continue
break
instance_masks = []
class_ids = []
parameters = []
if len(planes) > 0:
if 'joint' in self.config.ANCHOR_TYPE:
distances = np.linalg.norm(np.expand_dims(planes, 1) - self.config.ANCHOR_PLANES, axis=-1)
plane_anchors = distances.argmin(-1)
elif self.config.ANCHOR_TYPE == 'Nd':
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
distances_N = np.linalg.norm(np.expand_dims(plane_normals, 1) - self.config.ANCHOR_NORMALS, axis=-1)
normal_anchors = distances_N.argmin(-1)
distances_d = np.abs(np.expand_dims(plane_offsets, -1) - self.config.ANCHOR_OFFSETS)
offset_anchors = distances_d.argmin(-1)
elif self.config.ANCHOR_TYPE in ['normal', 'patch']:
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
distances_N = np.linalg.norm(np.expand_dims(plane_normals, 1) - self.config.ANCHOR_NORMALS, axis=-1)
normal_anchors = distances_N.argmin(-1)
elif self.config.ANCHOR_TYPE == 'normal_none':
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
pass
pass
for planeIndex, plane in enumerate(planes):
m = segmentation == planeIndex
if m.sum() < 1:
continue
instance_masks.append(m)
if self.config.ANCHOR_TYPE == 'none':
class_ids.append(1)
parameters.append(np.concatenate([plane, np.zeros(1)], axis=0))
elif 'joint' in self.config.ANCHOR_TYPE:
class_ids.append(plane_anchors[planeIndex] + 1)
residual = plane - self.config.ANCHOR_PLANES[plane_anchors[planeIndex]]
parameters.append(np.concatenate([residual, np.zeros(1)], axis=0))
elif self.config.ANCHOR_TYPE == 'Nd':
class_ids.append(normal_anchors[planeIndex] * len(self.config.ANCHOR_OFFSETS) + offset_anchors[planeIndex] + 1)
normal = plane_normals[planeIndex] - self.config.ANCHOR_NORMALS[normal_anchors[planeIndex]]
offset = plane_offsets[planeIndex] - self.config.ANCHOR_OFFSETS[offset_anchors[planeIndex]]
parameters.append(np.concatenate([normal, np.array([offset])], axis=0))
elif self.config.ANCHOR_TYPE == 'normal':
class_ids.append(normal_anchors[planeIndex] + 1)
normal = plane_normals[planeIndex] - self.config.ANCHOR_NORMALS[normal_anchors[planeIndex]]
parameters.append(np.concatenate([normal, np.zeros(1)], axis=0))
elif self.config.ANCHOR_TYPE == 'normal_none':
class_ids.append(1)
normal = plane_normals[planeIndex]
parameters.append(np.concatenate([normal, np.zeros(1)], axis=0))
else:
assert(False)
pass
continue
parameters = np.array(parameters, dtype=np.float32)
mask = np.stack(instance_masks, axis=2)
class_ids = np.array(class_ids, dtype=np.int32)
image, image_metas, gt_class_ids, gt_boxes, gt_masks, gt_parameters = load_image_gt(self.config, index, image, mask, class_ids, parameters, augment=self.split == 'train')
## RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,
gt_class_ids, gt_boxes, self.config)
## If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
gt_parameters = gt_parameters[ids]
## Add to batch
rpn_match = rpn_match[:, np.newaxis]
image = utils.mold_image(image.astype(np.float32), self.config)
depth = np.concatenate([np.zeros((80, 640)), depth, np.zeros((80, 640))], axis=0)
segmentation = np.concatenate([np.full((80, 640), fill_value=-1, dtype=np.int32), segmentation, np.full((80, 640), fill_value=-1, dtype=np.int32)], axis=0)
info = [image.transpose((2, 0, 1)).astype(np.float32), image_metas, rpn_match, rpn_bbox.astype(np.float32), gt_class_ids, gt_boxes.astype(np.float32), gt_masks.transpose((2, 0, 1)).astype(np.float32), gt_parameters, depth.astype(np.float32), segmentation, camera.astype(np.float32)]
if self.loadNeighborImage:
if imageIndex + self.options.frameGap < len(scene.imagePaths):
imagePath = scene.imagePaths[imageIndex + self.options.frameGap]
else:
imagePath = scene.imagePaths[imageIndex - self.options.frameGap]
pass
image_2 = cv2.imread(imagePath)
image_2 = cv2.resize(image_2, (self.config.IMAGE_MAX_DIM, self.config.IMAGE_MAX_DIM))
info.append(image_2.transpose((2, 0, 1)).astype(np.float32))
extrinsics_2_inv = []
posePath = imagePath.replace('color', 'pose').replace('.jpg', '.txt')
with open(posePath, 'r') as f:
for line in f:
extrinsics_2_inv += [float(value) for value in line.strip().split(' ') if value.strip() != '']
continue
f.close()
pass
extrinsics_2_inv = np.array(extrinsics_2_inv).reshape((4, 4))
extrinsics_2 = np.linalg.inv(extrinsics_2_inv)
temp = extrinsics_2[1].copy()
extrinsics_2[1] = extrinsics_2[2]
extrinsics_2[2] = -temp
transformation = np.matmul(extrinsics_2, np.linalg.inv(extrinsics))
if np.any(np.isnan(transformation)):
transformation = np.concatenate([np.diag(np.ones(3)), np.zeros((3, 1))], axis=-1)
pass
rotation = transformation[:3, :3]
translation = transformation[:3, 3]
axis, angle = utils.rotationMatrixToAxisAngle(rotation)
pose = np.concatenate([translation, axis * angle], axis=0).astype(np.float32)
info.append(pose)
info.append(scene.scenePath + ' ' + str(imageIndex))
pass
return info
def getAnchorPlanesNormalOffset(self, visualize=False):
for k in [7, ]:
print('k', k)
filename_N = self.dataFolder + '/anchor_planes_N_' + str(k) + '.npy'
filename_d = self.dataFolder + '/anchor_planes_d.npy'
if os.path.exists(filename_N) and os.path.exists(filename_d) and False:
return
if os.path.exists('test/anchor_planes/all_planes.npy'):
all_planes = np.load('test/anchor_planes/all_planes.npy')
else:
all_planes = []
for sceneIndex, imageIndex in self.sceneImageIndices[:10000]:
if len(all_planes) % 100 == 0:
print(len(all_planes))
pass
scene = self.scenes[sceneIndex]
image, planes, plane_info, segmentation, depth, camera, extrinsics = scene[imageIndex]
planes = planes[np.linalg.norm(planes, axis=-1) > 1e-4]
if len(planes) == 0:
continue
all_planes.append(planes)
continue
all_planes = np.concatenate(all_planes, axis=0)
np.save('test/anchor_planes/all_planes.npy', all_planes)
pass
from sklearn.cluster import KMeans
num_anchor_planes_N = k
num_anchor_planes_d = 3
offsets = np.linalg.norm(all_planes, axis=-1)
normals = all_planes / np.expand_dims(offsets, -1)
kmeans_N = KMeans(n_clusters=num_anchor_planes_N).fit(normals)
self.anchor_planes_N = kmeans_N.cluster_centers_
## Global offset anchors
kmeans_d = KMeans(n_clusters=num_anchor_planes_d).fit(np.expand_dims(offsets, -1))
self.anchor_planes_d = kmeans_d.cluster_centers_
if visualize:
color_map = utils.ColorPalette(max(num_anchor_planes_N, num_anchor_planes_d)).getColorMap()
normals_rotated = normals.copy()
normals_rotated[:, 1] = normals[:, 2]
normals_rotated[:, 2] = -normals[:, 1]
plane_cloud = np.concatenate([normals_rotated, color_map[kmeans_N.labels_]], axis=-1)
utils.writePointCloud('test/anchor_planes/anchor_planes_N.ply', plane_cloud)
plane_cloud = np.concatenate([all_planes, color_map[kmeans_d.labels_]], axis=-1)
utils.writePointCloud('test/anchor_planes/anchor_planes_d.ply', plane_cloud)
width = 500
height = 500
Us = np.round(np.arctan2(normals[:, 1], normals[:, 0]) / np.pi * width).astype(np.int32)
Vs = np.round((1 - (np.arcsin(normals[:, 2]) + np.pi / 2) / np.pi) * height).astype(np.int32)
indices = Vs * width + Us
validMask = np.logical_and(np.logical_and(Us >= 0, Us < width), np.logical_and(Vs >= 0, Vs < height))
indices = indices[validMask]
normalImage = np.zeros((height * width, 3))
normalImage[indices] = color_map[kmeans_N.labels_[validMask]]
normalImage = normalImage.reshape((height, width, 3))
cv2.imwrite('test/anchor_planes/normal_color_' + str(k) + '.png', normalImage)
exit(1)
pass
np.save(filename_N, self.anchor_planes_N)
np.save(filename_d, self.anchor_planes_d)
continue
return
def load_image_gt(config, image_id, image, depth, mask, class_ids, parameters, augment=False,
use_mini_mask=True):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: If true, apply random image augmentation. Currently, only
horizontal flipping is offered.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
## Load image and mask
shape = image.shape
image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MAX_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
mask = utils.resize_mask(mask, scale, padding)
## Random horizontal flips.
if augment and False:
if np.random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
depth = np.fliplr(depth)
pass
pass
## Bounding boxes. Note that some boxes might be all zeros
## if the corresponding mask got cropped out.
## bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
## Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
pass
active_class_ids = np.ones(config.NUM_CLASSES, dtype=np.int32)
## Image meta data
image_meta = utils.compose_image_meta(image_id, shape, window, active_class_ids)
if config.NUM_PARAMETER_CHANNELS > 0:
if config.OCCLUSION:
depth = utils.resize_mask(depth, scale, padding)
mask_visible = utils.minimize_mask(bbox, depth, config.MINI_MASK_SHAPE)
mask = np.stack([mask, mask_visible], axis=-1)
else:
depth = np.expand_dims(depth, -1)
depth = utils.resize_mask(depth, scale, padding).squeeze(-1)
depth = utils.minimize_depth(bbox, depth, config.MINI_MASK_SHAPE)
mask = np.stack([mask, depth], axis=-1)
pass
pass
return image, image_meta, class_ids, bbox, mask, parameters
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
## RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
## RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
## Handle COCO crowds
## A crowd box in COCO is a bounding box around several instances. Exclude
## them from training. A crowd box is given a negative class ID.
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
## Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
## Match anchors to GT Boxes
## If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
## If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
## Neutral anchors are those that don't match the conditions above,
## and they don't influence the loss function.
## However, don't keep any GT box unmatched (rare, but happens). Instead,
## match it to the closest anchor (even if its max IoU is < 0.3).
#
## 1. Set negative anchors first. They get overwritten below if a GT box is
## matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
## 2. Set an anchor for each GT box (regardless of IoU value).
## TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
## 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
## Subsample to balance positive and negative anchors
## Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
## Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
## Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
## Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
## For positive anchors, compute shift and scale needed to transform them
## to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 ## index into rpn_bbox
## TODO: use box_refinment() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
## Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
## Convert coordinates to center plus width/height.
## GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
## Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
## Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
## Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
| [
"numpy.log",
"utils.resize_mask",
"numpy.array",
"utils.compute_overlaps",
"numpy.arctan2",
"numpy.linalg.norm",
"utils.writePointCloud",
"utils.compose_image_meta",
"numpy.save",
"numpy.arange",
"os.path.exists",
"numpy.where",
"numpy.stack",
"utils.extract_bboxes",
"numpy.random.seed",... | [((20837, 20957), 'utils.resize_image', 'utils.resize_image', (['image'], {'min_dim': 'config.IMAGE_MAX_DIM', 'max_dim': 'config.IMAGE_MAX_DIM', 'padding': 'config.IMAGE_PADDING'}), '(image, min_dim=config.IMAGE_MAX_DIM, max_dim=config.\n IMAGE_MAX_DIM, padding=config.IMAGE_PADDING)\n', (20855, 20957), False, 'import utils\n'), ((20998, 21037), 'utils.resize_mask', 'utils.resize_mask', (['mask', 'scale', 'padding'], {}), '(mask, scale, padding)\n', (21015, 21037), False, 'import utils\n'), ((21460, 21486), 'utils.extract_bboxes', 'utils.extract_bboxes', (['mask'], {}), '(mask)\n', (21480, 21486), False, 'import utils\n'), ((21676, 21719), 'numpy.ones', 'np.ones', (['config.NUM_CLASSES'], {'dtype': 'np.int32'}), '(config.NUM_CLASSES, dtype=np.int32)\n', (21683, 21719), True, 'import numpy as np\n'), ((21760, 21827), 'utils.compose_image_meta', 'utils.compose_image_meta', (['image_id', 'shape', 'window', 'active_class_ids'], {}), '(image_id, shape, window, active_class_ids)\n', (21784, 21827), False, 'import utils\n'), ((23164, 23208), 'numpy.zeros', 'np.zeros', (['[anchors.shape[0]]'], {'dtype': 'np.int32'}), '([anchors.shape[0]], dtype=np.int32)\n', (23172, 23208), True, 'import numpy as np\n'), ((23303, 23352), 'numpy.zeros', 'np.zeros', (['(config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4)'], {}), '((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n', (23311, 23352), True, 'import numpy as np\n'), ((23548, 23587), 'numpy.ones', 'np.ones', (['[anchors.shape[0]]'], {'dtype': 'bool'}), '([anchors.shape[0]], dtype=bool)\n', (23555, 23587), True, 'import numpy as np\n'), ((23660, 23701), 'utils.compute_overlaps', 'utils.compute_overlaps', (['anchors', 'gt_boxes'], {}), '(anchors, gt_boxes)\n', (23682, 23701), False, 'import utils\n'), ((24315, 24342), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (24324, 24342), True, 'import numpy as np\n'), ((24639, 24666), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(0)'}), '(overlaps, axis=0)\n', (24648, 24666), True, 'import numpy as np\n'), ((827, 864), 'numpy.load', 'np.load', (['"""datasets/scene_ids_val.npy"""'], {}), "('datasets/scene_ids_val.npy')\n", (834, 864), True, 'import numpy as np\n'), ((2351, 2392), 'numpy.random.shuffle', 'np.random.shuffle', (['self.sceneImageIndices'], {}), '(self.sceneImageIndices)\n', (2368, 2392), True, 'import numpy as np\n'), ((3244, 3410), 'utils.generate_pyramid_anchors', 'utils.generate_pyramid_anchors', (['config.RPN_ANCHOR_SCALES', 'config.RPN_ANCHOR_RATIOS', 'config.BACKBONE_SHAPES', 'config.BACKBONE_STRIDES', 'config.RPN_ANCHOR_STRIDE'], {}), '(config.RPN_ANCHOR_SCALES, config.\n RPN_ANCHOR_RATIOS, config.BACKBONE_SHAPES, config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n', (3274, 3410), False, 'import utils\n'), ((7073, 7119), 'numpy.linalg.norm', 'np.linalg.norm', (['planes'], {'axis': '(-1)', 'keepdims': '(True)'}), '(planes, axis=-1, keepdims=True)\n', (7087, 7119), True, 'import numpy as np\n'), ((7768, 7820), 'numpy.linalg.norm', 'np.linalg.norm', (['planeNormals'], {'axis': '(-1)', 'keepdims': '(True)'}), '(planeNormals, axis=-1, keepdims=True)\n', (7782, 7820), True, 'import numpy as np\n'), ((7844, 7901), 'numpy.sum', 'np.sum', (['(newCenters * planeNormals)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(newCenters * planeNormals, axis=-1, keepdims=True)\n', (7850, 7901), True, 'import numpy as np\n'), ((8061, 8171), 'numpy.random.seed', 'np.random.seed', (['(((t & 4278190080) >> 24) + ((t & 16711680) >> 8) + ((t & 65280) << 8) + ((\n t & 255) << 24))'], {}), '(((t & 4278190080) >> 24) + ((t & 16711680) >> 8) + ((t & \n 65280) << 8) + ((t & 255) << 24))\n', (8075, 8171), True, 'import numpy as np\n'), ((12553, 12591), 'numpy.array', 'np.array', (['parameters'], {'dtype': 'np.float32'}), '(parameters, dtype=np.float32)\n', (12561, 12591), True, 'import numpy as np\n'), ((12607, 12639), 'numpy.stack', 'np.stack', (['instance_masks'], {'axis': '(2)'}), '(instance_masks, axis=2)\n', (12615, 12639), True, 'import numpy as np\n'), ((12661, 12696), 'numpy.array', 'np.array', (['class_ids'], {'dtype': 'np.int32'}), '(class_ids, dtype=np.int32)\n', (12669, 12696), True, 'import numpy as np\n'), ((21112, 21135), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (21129, 21135), True, 'import numpy as np\n'), ((21583, 21638), 'utils.minimize_mask', 'utils.minimize_mask', (['bbox', 'mask', 'config.MINI_MASK_SHAPE'], {}), '(bbox, mask, config.MINI_MASK_SHAPE)\n', (21602, 21638), False, 'import utils\n'), ((24920, 24944), 'numpy.where', 'np.where', (['(rpn_match == 1)'], {}), '(rpn_match == 1)\n', (24928, 24944), True, 'import numpy as np\n'), ((25088, 25131), 'numpy.random.choice', 'np.random.choice', (['ids', 'extra'], {'replace': '(False)'}), '(ids, extra, replace=False)\n', (25104, 25131), True, 'import numpy as np\n'), ((25204, 25229), 'numpy.where', 'np.where', (['(rpn_match == -1)'], {}), '(rpn_match == -1)\n', (25212, 25229), True, 'import numpy as np\n'), ((25416, 25459), 'numpy.random.choice', 'np.random.choice', (['ids', 'extra'], {'replace': '(False)'}), '(ids, extra, replace=False)\n', (25432, 25459), True, 'import numpy as np\n'), ((25620, 25644), 'numpy.where', 'np.where', (['(rpn_match == 1)'], {}), '(rpn_match == 1)\n', (25628, 25644), True, 'import numpy as np\n'), ((2081, 2191), 'numpy.random.seed', 'np.random.seed', (['(((t & 4278190080) >> 24) + ((t & 16711680) >> 8) + ((t & 65280) << 8) + ((\n t & 255) << 24))'], {}), '(((t & 4278190080) >> 24) + ((t & 16711680) >> 8) + ((t & \n 65280) << 8) + ((t & 255) << 24))\n', (2095, 2191), True, 'import numpy as np\n'), ((2308, 2325), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2322, 2325), True, 'import numpy as np\n'), ((14555, 14576), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (14565, 14576), False, 'import cv2\n'), ((14612, 14687), 'cv2.resize', 'cv2.resize', (['image_2', '(self.config.IMAGE_MAX_DIM, self.config.IMAGE_MAX_DIM)'], {}), '(image_2, (self.config.IMAGE_MAX_DIM, self.config.IMAGE_MAX_DIM))\n', (14622, 14687), False, 'import cv2\n'), ((15269, 15300), 'numpy.linalg.inv', 'np.linalg.inv', (['extrinsics_2_inv'], {}), '(extrinsics_2_inv)\n', (15282, 15300), True, 'import numpy as np\n'), ((15816, 15857), 'utils.rotationMatrixToAxisAngle', 'utils.rotationMatrixToAxisAngle', (['rotation'], {}), '(rotation)\n', (15847, 15857), False, 'import utils\n'), ((16483, 16534), 'os.path.exists', 'os.path.exists', (['"""test/anchor_planes/all_planes.npy"""'], {}), "('test/anchor_planes/all_planes.npy')\n", (16497, 16534), False, 'import os\n'), ((17568, 17603), 'numpy.linalg.norm', 'np.linalg.norm', (['all_planes'], {'axis': '(-1)'}), '(all_planes, axis=-1)\n', (17582, 17603), True, 'import numpy as np\n'), ((19532, 19573), 'numpy.save', 'np.save', (['filename_N', 'self.anchor_planes_N'], {}), '(filename_N, self.anchor_planes_N)\n', (19539, 19573), True, 'import numpy as np\n'), ((19586, 19627), 'numpy.save', 'np.save', (['filename_d', 'self.anchor_planes_d'], {}), '(filename_d, self.anchor_planes_d)\n', (19593, 19627), True, 'import numpy as np\n'), ((21157, 21173), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (21166, 21173), True, 'import numpy as np\n'), ((21193, 21208), 'numpy.fliplr', 'np.fliplr', (['mask'], {}), '(mask)\n', (21202, 21208), True, 'import numpy as np\n'), ((21229, 21245), 'numpy.fliplr', 'np.fliplr', (['depth'], {}), '(depth)\n', (21238, 21245), True, 'import numpy as np\n'), ((21920, 21960), 'utils.resize_mask', 'utils.resize_mask', (['depth', 'scale', 'padding'], {}), '(depth, scale, padding)\n', (21937, 21960), False, 'import utils\n'), ((22000, 22056), 'utils.minimize_mask', 'utils.minimize_mask', (['bbox', 'depth', 'config.MINI_MASK_SHAPE'], {}), '(bbox, depth, config.MINI_MASK_SHAPE)\n', (22019, 22056), False, 'import utils\n'), ((22076, 22115), 'numpy.stack', 'np.stack', (['[mask, mask_visible]'], {'axis': '(-1)'}), '([mask, mask_visible], axis=-1)\n', (22084, 22115), True, 'import numpy as np\n'), ((22150, 22175), 'numpy.expand_dims', 'np.expand_dims', (['depth', '(-1)'], {}), '(depth, -1)\n', (22164, 22175), True, 'import numpy as np\n'), ((22269, 22326), 'utils.minimize_depth', 'utils.minimize_depth', (['bbox', 'depth', 'config.MINI_MASK_SHAPE'], {}), '(bbox, depth, config.MINI_MASK_SHAPE)\n', (22289, 22326), False, 'import utils\n'), ((22346, 22378), 'numpy.stack', 'np.stack', (['[mask, depth]'], {'axis': '(-1)'}), '([mask, depth], axis=-1)\n', (22354, 22378), True, 'import numpy as np\n'), ((24373, 24401), 'numpy.arange', 'np.arange', (['overlaps.shape[0]'], {}), '(overlaps.shape[0])\n', (24382, 24401), True, 'import numpy as np\n'), ((25318, 25340), 'numpy.sum', 'np.sum', (['(rpn_match == 1)'], {}), '(rpn_match == 1)\n', (25324, 25340), True, 'import numpy as np\n'), ((26453, 26471), 'numpy.log', 'np.log', (['(gt_h / a_h)'], {}), '(gt_h / a_h)\n', (26459, 26471), True, 'import numpy as np\n'), ((26485, 26503), 'numpy.log', 'np.log', (['(gt_w / a_w)'], {}), '(gt_w / a_w)\n', (26491, 26503), True, 'import numpy as np\n'), ((1642, 1792), 'datasets.scannet_scene.ScanNetScene', 'ScanNetScene', (['options', 'scenePath', 'scene_id', 'self.confident_labels', 'self.layout_labels'], {'load_semantics': 'load_semantics', 'load_boundary': 'load_boundary'}), '(options, scenePath, scene_id, self.confident_labels, self.\n layout_labels, load_semantics=load_semantics, load_boundary=load_boundary)\n', (1654, 1792), False, 'from datasets.scannet_scene import ScanNetScene\n'), ((7197, 7226), 'numpy.ones', 'np.ones', (['(planes.shape[0], 1)'], {}), '((planes.shape[0], 1))\n', (7204, 7226), True, 'import numpy as np\n'), ((7298, 7319), 'numpy.transpose', 'np.transpose', (['centers'], {}), '(centers)\n', (7310, 7319), True, 'import numpy as np\n'), ((7421, 7453), 'numpy.maximum', 'np.maximum', (['planeOffsets', '(0.0001)'], {}), '(planeOffsets, 0.0001)\n', (7431, 7453), True, 'import numpy as np\n'), ((7499, 7528), 'numpy.ones', 'np.ones', (['(planes.shape[0], 1)'], {}), '((planes.shape[0], 1))\n', (7506, 7528), True, 'import numpy as np\n'), ((7602, 7625), 'numpy.transpose', 'np.transpose', (['refPoints'], {}), '(refPoints)\n', (7614, 7625), True, 'import numpy as np\n'), ((8030, 8041), 'time.time', 'time.time', ([], {}), '()\n', (8039, 8041), False, 'import time\n'), ((13248, 13276), 'numpy.arange', 'np.arange', (['gt_boxes.shape[0]'], {}), '(gt_boxes.shape[0])\n', (13257, 13276), True, 'import numpy as np\n'), ((13682, 13701), 'numpy.zeros', 'np.zeros', (['(80, 640)'], {}), '((80, 640))\n', (13690, 13701), True, 'import numpy as np\n'), ((13710, 13729), 'numpy.zeros', 'np.zeros', (['(80, 640)'], {}), '((80, 640))\n', (13718, 13729), True, 'import numpy as np\n'), ((13779, 13828), 'numpy.full', 'np.full', (['(80, 640)'], {'fill_value': '(-1)', 'dtype': 'np.int32'}), '((80, 640), fill_value=-1, dtype=np.int32)\n', (13786, 13828), True, 'import numpy as np\n'), ((13844, 13893), 'numpy.full', 'np.full', (['(80, 640)'], {'fill_value': '(-1)', 'dtype': 'np.int32'}), '((80, 640), fill_value=-1, dtype=np.int32)\n', (13851, 13893), True, 'import numpy as np\n'), ((15492, 15517), 'numpy.linalg.inv', 'np.linalg.inv', (['extrinsics'], {}), '(extrinsics)\n', (15505, 15517), True, 'import numpy as np\n'), ((15541, 15565), 'numpy.isnan', 'np.isnan', (['transformation'], {}), '(transformation)\n', (15549, 15565), True, 'import numpy as np\n'), ((16375, 16401), 'os.path.exists', 'os.path.exists', (['filename_N'], {}), '(filename_N)\n', (16389, 16401), False, 'import os\n'), ((16406, 16432), 'os.path.exists', 'os.path.exists', (['filename_d'], {}), '(filename_d)\n', (16420, 16432), False, 'import os\n'), ((16565, 16609), 'numpy.load', 'np.load', (['"""test/anchor_planes/all_planes.npy"""'], {}), "('test/anchor_planes/all_planes.npy')\n", (16572, 16609), True, 'import numpy as np\n'), ((17279, 17313), 'numpy.concatenate', 'np.concatenate', (['all_planes'], {'axis': '(0)'}), '(all_planes, axis=0)\n', (17293, 17313), True, 'import numpy as np\n'), ((17330, 17386), 'numpy.save', 'np.save', (['"""test/anchor_planes/all_planes.npy"""', 'all_planes'], {}), "('test/anchor_planes/all_planes.npy', all_planes)\n", (17337, 17386), True, 'import numpy as np\n'), ((17639, 17666), 'numpy.expand_dims', 'np.expand_dims', (['offsets', '(-1)'], {}), '(offsets, -1)\n', (17653, 17666), True, 'import numpy as np\n'), ((17908, 17935), 'numpy.expand_dims', 'np.expand_dims', (['offsets', '(-1)'], {}), '(offsets, -1)\n', (17922, 17935), True, 'import numpy as np\n'), ((18333, 18404), 'numpy.concatenate', 'np.concatenate', (['[normals_rotated, color_map[kmeans_N.labels_]]'], {'axis': '(-1)'}), '([normals_rotated, color_map[kmeans_N.labels_]], axis=-1)\n', (18347, 18404), True, 'import numpy as np\n'), ((18421, 18497), 'utils.writePointCloud', 'utils.writePointCloud', (['"""test/anchor_planes/anchor_planes_N.ply"""', 'plane_cloud'], {}), "('test/anchor_planes/anchor_planes_N.ply', plane_cloud)\n", (18442, 18497), False, 'import utils\n'), ((18529, 18595), 'numpy.concatenate', 'np.concatenate', (['[all_planes, color_map[kmeans_d.labels_]]'], {'axis': '(-1)'}), '([all_planes, color_map[kmeans_d.labels_]], axis=-1)\n', (18543, 18595), True, 'import numpy as np\n'), ((18612, 18688), 'utils.writePointCloud', 'utils.writePointCloud', (['"""test/anchor_planes/anchor_planes_d.ply"""', 'plane_cloud'], {}), "('test/anchor_planes/anchor_planes_d.ply', plane_cloud)\n", (18633, 18688), False, 'import utils\n'), ((19201, 19230), 'numpy.zeros', 'np.zeros', (['(height * width, 3)'], {}), '((height * width, 3))\n', (19209, 19230), True, 'import numpy as np\n'), ((2046, 2057), 'time.time', 'time.time', ([], {}), '()\n', (2055, 2057), False, 'import time\n'), ((9651, 9682), 'numpy.linalg.norm', 'np.linalg.norm', (['planes'], {'axis': '(-1)'}), '(planes, axis=-1)\n', (9665, 9682), True, 'import numpy as np\n'), ((15199, 15225), 'numpy.array', 'np.array', (['extrinsics_2_inv'], {}), '(extrinsics_2_inv)\n', (15207, 15225), True, 'import numpy as np\n'), ((15877, 15928), 'numpy.concatenate', 'np.concatenate', (['[translation, axis * angle]'], {'axis': '(0)'}), '([translation, axis * angle], axis=0)\n', (15891, 15928), True, 'import numpy as np\n'), ((17691, 17729), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_anchor_planes_N'}), '(n_clusters=num_anchor_planes_N)\n', (17697, 17729), False, 'from sklearn.cluster import KMeans\n'), ((17865, 17903), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_anchor_planes_d'}), '(n_clusters=num_anchor_planes_d)\n', (17871, 17903), False, 'from sklearn.cluster import KMeans\n'), ((19048, 19083), 'numpy.logical_and', 'np.logical_and', (['(Us >= 0)', '(Us < width)'], {}), '(Us >= 0, Us < width)\n', (19062, 19083), True, 'import numpy as np\n'), ((19086, 19122), 'numpy.logical_and', 'np.logical_and', (['(Vs >= 0)', '(Vs < height)'], {}), '(Vs >= 0, Vs < height)\n', (19100, 19122), True, 'import numpy as np\n'), ((22196, 22236), 'utils.resize_mask', 'utils.resize_mask', (['depth', 'scale', 'padding'], {}), '(depth, scale, padding)\n', (22213, 22236), False, 'import utils\n'), ((1476, 1527), 'os.path.exists', 'os.path.exists', (["(scenePath + '/' + scene_id + '.txt')"], {}), "(scenePath + '/' + scene_id + '.txt')\n", (1490, 1527), False, 'import os\n'), ((1535, 1587), 'os.path.exists', 'os.path.exists', (["(scenePath + '/annotation/planes.npy')"], {}), "(scenePath + '/annotation/planes.npy')\n", (1549, 1587), False, 'import os\n'), ((9452, 9477), 'numpy.expand_dims', 'np.expand_dims', (['planes', '(1)'], {}), '(planes, 1)\n', (9466, 9477), True, 'import numpy as np\n'), ((9724, 9762), 'numpy.expand_dims', 'np.expand_dims', (['plane_offsets'], {'axis': '(-1)'}), '(plane_offsets, axis=-1)\n', (9738, 9762), True, 'import numpy as np\n'), ((10206, 10237), 'numpy.linalg.norm', 'np.linalg.norm', (['planes'], {'axis': '(-1)'}), '(planes, axis=-1)\n', (10220, 10237), True, 'import numpy as np\n'), ((15638, 15654), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (15646, 15654), True, 'import numpy as np\n'), ((9808, 9840), 'numpy.expand_dims', 'np.expand_dims', (['plane_normals', '(1)'], {}), '(plane_normals, 1)\n', (9822, 9840), True, 'import numpy as np\n'), ((9989, 10022), 'numpy.expand_dims', 'np.expand_dims', (['plane_offsets', '(-1)'], {}), '(plane_offsets, -1)\n', (10003, 10022), True, 'import numpy as np\n'), ((10295, 10333), 'numpy.expand_dims', 'np.expand_dims', (['plane_offsets'], {'axis': '(-1)'}), '(plane_offsets, axis=-1)\n', (10309, 10333), True, 'import numpy as np\n'), ((10598, 10629), 'numpy.linalg.norm', 'np.linalg.norm', (['planes'], {'axis': '(-1)'}), '(planes, axis=-1)\n', (10612, 10629), True, 'import numpy as np\n'), ((11097, 11108), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (11105, 11108), True, 'import numpy as np\n'), ((15625, 15635), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (15632, 15635), True, 'import numpy as np\n'), ((17061, 17092), 'numpy.linalg.norm', 'np.linalg.norm', (['planes'], {'axis': '(-1)'}), '(planes, axis=-1)\n', (17075, 17092), True, 'import numpy as np\n'), ((10379, 10411), 'numpy.expand_dims', 'np.expand_dims', (['plane_normals', '(1)'], {}), '(plane_normals, 1)\n', (10393, 10411), True, 'import numpy as np\n'), ((10691, 10729), 'numpy.expand_dims', 'np.expand_dims', (['plane_offsets'], {'axis': '(-1)'}), '(plane_offsets, axis=-1)\n', (10705, 10729), True, 'import numpy as np\n'), ((11385, 11396), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (11393, 11396), True, 'import numpy as np\n'), ((11860, 11878), 'numpy.array', 'np.array', (['[offset]'], {}), '([offset])\n', (11868, 11878), True, 'import numpy as np\n'), ((18778, 18818), 'numpy.arctan2', 'np.arctan2', (['normals[:, 1]', 'normals[:, 0]'], {}), '(normals[:, 1], normals[:, 0])\n', (18788, 18818), True, 'import numpy as np\n'), ((12175, 12186), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (12183, 12186), True, 'import numpy as np\n'), ((12402, 12413), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (12410, 12413), True, 'import numpy as np\n'), ((18889, 18913), 'numpy.arcsin', 'np.arcsin', (['normals[:, 2]'], {}), '(normals[:, 2])\n', (18898, 18913), True, 'import numpy as np\n')] |
import os, sys
import numpy as np
import pybullet as p
class Util:
def __init__(self, pid, np_random):
self.id = pid
self.ik_lower_limits = {}
self.ik_upper_limits = {}
self.ik_joint_ranges = {}
self.ik_rest_poses = {}
self.np_random = np_random
def enable_gpu(self):
import GPUtil as GPU
os.environ['MESA_GL_VERSION_OVERRIDE'] = '3.3'
os.environ['MESA_GLSL_VERSION_OVERRIDE'] = '330'
enableGPU = False
# Get all device ids and their processing and memory utiliazion
# (deviceIds, gpuUtil, memUtil) = GPU.getGPUs()
# Print os and python version information
print('OS: ' + sys.platform)
print(sys.version)
# Print package name and version number
print(GPU.__name__ + ' ' + GPU.__version__)
# Show the utilization of all GPUs in a nice table
GPU.showUtilization()
# Show all stats of all GPUs in a nice table
GPU.showUtilization(all=True)
# NOTE: If all your GPUs currently have a memory consumption larger than 1%, this step will fail. It's not a bug! It is intended to do so, if it does not find an available GPU.
GPUs = GPU.getGPUs()
numGPUs = len(GPU.getGPUs())
print("numGPUs=",numGPUs)
if numGPUs > 0:
enableGPU = True
eglPluginId = -1
if enableGPU:
import pkgutil
egl = pkgutil.get_loader('eglRenderer')
if (egl):
eglPluginId = p.loadPlugin(egl.get_filename(), "_eglRendererPlugin", physicsClientId=self.id)
else:
eglPluginId = p.loadPlugin("eglRendererPlugin", physicsClientId=self.id)
if eglPluginId>=0:
print("Using GPU hardware (eglRenderer)")
else:
print("Using CPU renderer (TinyRenderer)")
def points_in_cylinder(self, pt1, pt2, r, q):
vec = pt2 - pt1
const = r * np.linalg.norm(vec)
return np.dot(q - pt1, vec) >= 0 and np.dot(q - pt2, vec) <= 0 and np.linalg.norm(np.cross(q - pt1, vec)) <= const
def point_on_capsule(self, p1, p2, radius, theta_range=(0, np.pi*2)):
'''
Pick a random point along the outer surface of a capsule (cylinder)
'''
# Pick a random point along the length of the capsule
axis_vector = p2 - p1
random_length = self.np_random.uniform(radius, np.linalg.norm(axis_vector))
# Normalize axis vector to unit length
axis_vector = axis_vector / np.linalg.norm(axis_vector)
ortho_vector = self.orthogonal_vector(axis_vector)
# Normalize orthogonal vector to unit length
ortho_vector = ortho_vector / np.linalg.norm(ortho_vector)
# Determine normal vector through cross product (this will be of unit length)
normal_vector = np.cross(axis_vector, ortho_vector)
# Pick a random rotation along the cylinder
theta = self.np_random.uniform(theta_range[0], theta_range[1])
point = p1 + random_length*axis_vector + radius*np.cos(theta)*ortho_vector + radius*np.sin(theta)*normal_vector
return point
def capsule_points(self, p1, p2, radius, distance_between_points=0.05):
'''
Creates a set of points around a capsule.
Check out: http://mathworld.wolfram.com/ConicalFrustum.html
and: http://math.stackexchange.com/questions/73237/parametric-equation-of-a-circle-in-3d-space
sphere = [x, y, z, r]
'''
points = []
p1, p2 = np.array(p1), np.array(p2)
axis_vector = p2 - p1
# Normalize axis vector to unit length
axis_vector = axis_vector / np.linalg.norm(axis_vector)
ortho_vector = self.orthogonal_vector(axis_vector)
# Normalize orthogonal vector to unit length
ortho_vector = ortho_vector / np.linalg.norm(ortho_vector)
# Determine normal vector through cross product (this will be of unit length)
normal_vector = np.cross(axis_vector, ortho_vector)
# Determine the section positions along the frustum at which we will create point around in a circular fashion
sections = int(np.linalg.norm(p2 - p1) / distance_between_points)
section_positions = [(p2 - p1) / (sections + 1) * (i + 1) for i in range(sections)]
for i, section_pos in enumerate(section_positions):
# Determine radius and circumference of this section
circumference = 2*np.pi*radius
# Determine the angle difference (in radians) between points
theta_dist = distance_between_points / radius
for j in range(int(circumference / distance_between_points)):
theta = theta_dist * j
# Determine cartesian coordinates for the point along the circular section of the frustum
point_on_circle = p1 + section_pos + radius*np.cos(theta)*ortho_vector + radius*np.sin(theta)*normal_vector
points.append(point_on_circle)
return points
def orthogonal_vector(self, v):
'''
Two Euclidean vectors are orthogonal if and only if their dot product is zero.
'''
# Find first element in v that is nonzero
m = np.argmax(np.abs(v))
y = np.zeros(len(v))
y[(m+1) % len(v)] = 1
return np.cross(v, y)
def line_intersects_triangle(self, p0, p1, p2, q0, q1):
# Check that the arm line segment intersects two different triangles defined by points around the sleeve.
# https://stackoverflow.com/questions/42740765/intersection-between-line-and-triangle-in-3d
signed_volume = lambda a, b, c, d: (1.0/6.0) * np.dot(np.cross(b-a, c-a), d-a)
if np.sign(signed_volume(q0, p0, p1, p2)) != np.sign(signed_volume(q1, p0, p1, p2)):
if np.sign(signed_volume(q0, q1, p0, p1)) == np.sign(signed_volume(q0, q1, p1, p2)) == np.sign(signed_volume(q0, q1, p2, p0)):
return True
return False
def sleeve_on_arm_reward(self, triangle1_points, triangle2_points, shoulder_pos, elbow_pos, wrist_pos, hand_radius, elbow_radius, shoulder_radius):
# Use full length of arm, rather than from hand center to elbow center
wrist_pos, elbow_pos, shoulder_pos = np.array(wrist_pos), np.array(elbow_pos), np.array(shoulder_pos)
hand_end_pos = wrist_pos + (wrist_pos - elbow_pos) / np.linalg.norm(wrist_pos - elbow_pos) * hand_radius*2
elbow_end_pos = elbow_pos + (elbow_pos - wrist_pos) / np.linalg.norm(wrist_pos - elbow_pos) * elbow_radius
shoulder_end_pos = shoulder_pos + (shoulder_pos - elbow_pos) / np.linalg.norm(shoulder_pos - elbow_pos) * shoulder_radius
# Given the central axis of the arm, find the plane through the axis and one vector perpendicular to the axis
# and the plane through the axis and the second vector perpendicular to the other two.
# There must be points above and below both of these two planes
# https://math.stackexchange.com/questions/7931/point-below-a-plane
normal_forearm = hand_end_pos - elbow_end_pos
normal_forearm = normal_forearm / np.linalg.norm(normal_forearm)
# Normalized Tangent Vector, assumes arm axis not parallel to vector [1, 1, 0]
tangent_forearm = np.cross(np.array([1, 1, 0]), normal_forearm)
tangent_forearm = tangent_forearm / np.linalg.norm(tangent_forearm)
# Normalized Binormal_forearm or Bitangent_forearm vector
binormal_forearm = np.cross(tangent_forearm, normal_forearm)
binormal_forearm = binormal_forearm / np.linalg.norm(binormal_forearm)
# Check if at least one point exists above and below both planes
# v.dot(p - p0), p0 on plane, v is normal_forearm of a plane. v = tangent_forearm, v = binormal_forearm, p0 = elbow_end_pos
all_points = np.concatenate([triangle1_points, triangle2_points], axis=0)
# tangent_forearm_points = np.dot(tangent_forearm, (all_points - elbow_end_pos).T)
# binormal_forearm_points = np.dot(binormal_forearm, (all_points - elbow_end_pos).T)
tangent_forearm_points = np.dot(tangent_forearm, (all_points - hand_end_pos).T)
binormal_forearm_points = np.dot(binormal_forearm, (all_points - hand_end_pos).T)
points_above_below_forearm = np.any(tangent_forearm_points > 0) and np.any(tangent_forearm_points < 0) and np.any(binormal_forearm_points > 0) and np.any(binormal_forearm_points < 0)
# print(points_above_below_forearm)
normal_upperarm = elbow_end_pos - shoulder_end_pos
normal_upperarm = normal_upperarm / np.linalg.norm(normal_upperarm)
tangent_upperarm = np.cross(np.array([1, 1, 0]), normal_upperarm)
tangent_upperarm = tangent_upperarm / np.linalg.norm(tangent_upperarm)
binormal_upperarm = np.cross(tangent_upperarm, normal_upperarm)
binormal_upperarm = binormal_upperarm / np.linalg.norm(binormal_upperarm)
tangent_upperarm_points = np.dot(tangent_upperarm, (all_points - shoulder_end_pos).T)
binormal_upperarm_points = np.dot(binormal_upperarm, (all_points - shoulder_end_pos).T)
points_above_below_upperarm = np.any(tangent_upperarm_points > 0) and np.any(tangent_upperarm_points < 0) and np.any(binormal_upperarm_points > 0) and np.any(binormal_upperarm_points < 0)
# Check that the arm line segment intersects two different triangles defined by points around the sleeve.
# https://stackoverflow.com/questions/42740765/intersection-between-line-and-triangle-in-3d
forearm_intersects_triangle1 = self.line_intersects_triangle(triangle1_points[0], triangle1_points[1], triangle1_points[2], hand_end_pos, elbow_end_pos)
forearm_intersects_triangle2 = self.line_intersects_triangle(triangle2_points[0], triangle2_points[1], triangle2_points[2], hand_end_pos, elbow_end_pos)
upperarm_intersects_triangle1 = self.line_intersects_triangle(triangle1_points[0], triangle1_points[1], triangle1_points[2], elbow_end_pos, shoulder_end_pos)
upperarm_intersects_triangle2 = self.line_intersects_triangle(triangle2_points[0], triangle2_points[1], triangle2_points[2], elbow_end_pos, shoulder_end_pos)
sleeve_center = np.mean(all_points, axis=0)
distance_to_shoulder = np.linalg.norm(shoulder_end_pos - sleeve_center)
distance_to_elbow = np.linalg.norm(elbow_end_pos - sleeve_center)
distance_to_hand = np.linalg.norm(hand_end_pos - sleeve_center)
# all_points_opening = np.concatenate([triangle1_points_opening, triangle2_points_opening], axis=0)
# sleeve_center_opening = np.mean(all_points_opening, axis=0)
# distance_to_hand_opening = np.linalg.norm(hand_end_pos - sleeve_center_opening)
# Reward forward movement along the arm, away from the hand (pulling the sleeve onto the arm)
distance_along_forearm = np.linalg.norm(sleeve_center - hand_end_pos)
distance_along_upperarm = np.linalg.norm(sleeve_center - elbow_pos)
forearm_in_sleeve = points_above_below_forearm and (forearm_intersects_triangle1 or forearm_intersects_triangle2)
upperarm_in_sleeve = points_above_below_upperarm and (upperarm_intersects_triangle1 or upperarm_intersects_triangle2)
# Find the point at which the arm central axis intersects one of the triangles
# p0, p1, p2 = triangle1_points
# N = np.cross(p1-p0, p2-p0)
# t = -np.dot(hand_end_pos, N-p0) / np.dot(hand_end_pos, elbow_end_pos-hand_end_pos)
# intersection_point = hand_end_pos + t*(elbow_end_pos-hand_end_pos)
return forearm_in_sleeve, upperarm_in_sleeve, distance_along_forearm, distance_along_upperarm, distance_to_hand, distance_to_elbow, distance_to_shoulder, np.linalg.norm(hand_end_pos - elbow_end_pos), np.linalg.norm(elbow_pos - shoulder_pos)
| [
"pkgutil.get_loader",
"numpy.mean",
"numpy.abs",
"numpy.cross",
"GPUtil.getGPUs",
"numpy.any",
"numpy.array",
"numpy.dot",
"GPUtil.showUtilization",
"numpy.cos",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.sin",
"pybullet.loadPlugin"
] | [((904, 925), 'GPUtil.showUtilization', 'GPU.showUtilization', ([], {}), '()\n', (923, 925), True, 'import GPUtil as GPU\n'), ((988, 1017), 'GPUtil.showUtilization', 'GPU.showUtilization', ([], {'all': '(True)'}), '(all=True)\n', (1007, 1017), True, 'import GPUtil as GPU\n'), ((1219, 1232), 'GPUtil.getGPUs', 'GPU.getGPUs', ([], {}), '()\n', (1230, 1232), True, 'import GPUtil as GPU\n'), ((2863, 2898), 'numpy.cross', 'np.cross', (['axis_vector', 'ortho_vector'], {}), '(axis_vector, ortho_vector)\n', (2871, 2898), True, 'import numpy as np\n'), ((4012, 4047), 'numpy.cross', 'np.cross', (['axis_vector', 'ortho_vector'], {}), '(axis_vector, ortho_vector)\n', (4020, 4047), True, 'import numpy as np\n'), ((5351, 5365), 'numpy.cross', 'np.cross', (['v', 'y'], {}), '(v, y)\n', (5359, 5365), True, 'import numpy as np\n'), ((7528, 7569), 'numpy.cross', 'np.cross', (['tangent_forearm', 'normal_forearm'], {}), '(tangent_forearm, normal_forearm)\n', (7536, 7569), True, 'import numpy as np\n'), ((7876, 7936), 'numpy.concatenate', 'np.concatenate', (['[triangle1_points, triangle2_points]'], {'axis': '(0)'}), '([triangle1_points, triangle2_points], axis=0)\n', (7890, 7936), True, 'import numpy as np\n'), ((8154, 8208), 'numpy.dot', 'np.dot', (['tangent_forearm', '(all_points - hand_end_pos).T'], {}), '(tangent_forearm, (all_points - hand_end_pos).T)\n', (8160, 8208), True, 'import numpy as np\n'), ((8243, 8298), 'numpy.dot', 'np.dot', (['binormal_forearm', '(all_points - hand_end_pos).T'], {}), '(binormal_forearm, (all_points - hand_end_pos).T)\n', (8249, 8298), True, 'import numpy as np\n'), ((8851, 8894), 'numpy.cross', 'np.cross', (['tangent_upperarm', 'normal_upperarm'], {}), '(tangent_upperarm, normal_upperarm)\n', (8859, 8894), True, 'import numpy as np\n'), ((9011, 9070), 'numpy.dot', 'np.dot', (['tangent_upperarm', '(all_points - shoulder_end_pos).T'], {}), '(tangent_upperarm, (all_points - shoulder_end_pos).T)\n', (9017, 9070), True, 'import numpy as np\n'), ((9106, 9166), 'numpy.dot', 'np.dot', (['binormal_upperarm', '(all_points - shoulder_end_pos).T'], {}), '(binormal_upperarm, (all_points - shoulder_end_pos).T)\n', (9112, 9166), True, 'import numpy as np\n'), ((10256, 10283), 'numpy.mean', 'np.mean', (['all_points'], {'axis': '(0)'}), '(all_points, axis=0)\n', (10263, 10283), True, 'import numpy as np\n'), ((10315, 10363), 'numpy.linalg.norm', 'np.linalg.norm', (['(shoulder_end_pos - sleeve_center)'], {}), '(shoulder_end_pos - sleeve_center)\n', (10329, 10363), True, 'import numpy as np\n'), ((10392, 10437), 'numpy.linalg.norm', 'np.linalg.norm', (['(elbow_end_pos - sleeve_center)'], {}), '(elbow_end_pos - sleeve_center)\n', (10406, 10437), True, 'import numpy as np\n'), ((10465, 10509), 'numpy.linalg.norm', 'np.linalg.norm', (['(hand_end_pos - sleeve_center)'], {}), '(hand_end_pos - sleeve_center)\n', (10479, 10509), True, 'import numpy as np\n'), ((10915, 10959), 'numpy.linalg.norm', 'np.linalg.norm', (['(sleeve_center - hand_end_pos)'], {}), '(sleeve_center - hand_end_pos)\n', (10929, 10959), True, 'import numpy as np\n'), ((10994, 11035), 'numpy.linalg.norm', 'np.linalg.norm', (['(sleeve_center - elbow_pos)'], {}), '(sleeve_center - elbow_pos)\n', (11008, 11035), True, 'import numpy as np\n'), ((1255, 1268), 'GPUtil.getGPUs', 'GPU.getGPUs', ([], {}), '()\n', (1266, 1268), True, 'import GPUtil as GPU\n'), ((1449, 1482), 'pkgutil.get_loader', 'pkgutil.get_loader', (['"""eglRenderer"""'], {}), "('eglRenderer')\n", (1467, 1482), False, 'import pkgutil\n'), ((1968, 1987), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (1982, 1987), True, 'import numpy as np\n'), ((2433, 2460), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_vector'], {}), '(axis_vector)\n', (2447, 2460), True, 'import numpy as np\n'), ((2546, 2573), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_vector'], {}), '(axis_vector)\n', (2560, 2573), True, 'import numpy as np\n'), ((2724, 2752), 'numpy.linalg.norm', 'np.linalg.norm', (['ortho_vector'], {}), '(ortho_vector)\n', (2738, 2752), True, 'import numpy as np\n'), ((3555, 3567), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (3563, 3567), True, 'import numpy as np\n'), ((3569, 3581), 'numpy.array', 'np.array', (['p2'], {}), '(p2)\n', (3577, 3581), True, 'import numpy as np\n'), ((3695, 3722), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_vector'], {}), '(axis_vector)\n', (3709, 3722), True, 'import numpy as np\n'), ((3873, 3901), 'numpy.linalg.norm', 'np.linalg.norm', (['ortho_vector'], {}), '(ortho_vector)\n', (3887, 3901), True, 'import numpy as np\n'), ((5266, 5275), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (5272, 5275), True, 'import numpy as np\n'), ((6286, 6305), 'numpy.array', 'np.array', (['wrist_pos'], {}), '(wrist_pos)\n', (6294, 6305), True, 'import numpy as np\n'), ((6307, 6326), 'numpy.array', 'np.array', (['elbow_pos'], {}), '(elbow_pos)\n', (6315, 6326), True, 'import numpy as np\n'), ((6328, 6350), 'numpy.array', 'np.array', (['shoulder_pos'], {}), '(shoulder_pos)\n', (6336, 6350), True, 'import numpy as np\n'), ((7169, 7199), 'numpy.linalg.norm', 'np.linalg.norm', (['normal_forearm'], {}), '(normal_forearm)\n', (7183, 7199), True, 'import numpy as np\n'), ((7322, 7341), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (7330, 7341), True, 'import numpy as np\n'), ((7403, 7434), 'numpy.linalg.norm', 'np.linalg.norm', (['tangent_forearm'], {}), '(tangent_forearm)\n', (7417, 7434), True, 'import numpy as np\n'), ((7616, 7648), 'numpy.linalg.norm', 'np.linalg.norm', (['binormal_forearm'], {}), '(binormal_forearm)\n', (7630, 7648), True, 'import numpy as np\n'), ((8336, 8370), 'numpy.any', 'np.any', (['(tangent_forearm_points > 0)'], {}), '(tangent_forearm_points > 0)\n', (8342, 8370), True, 'import numpy as np\n'), ((8375, 8409), 'numpy.any', 'np.any', (['(tangent_forearm_points < 0)'], {}), '(tangent_forearm_points < 0)\n', (8381, 8409), True, 'import numpy as np\n'), ((8414, 8449), 'numpy.any', 'np.any', (['(binormal_forearm_points > 0)'], {}), '(binormal_forearm_points > 0)\n', (8420, 8449), True, 'import numpy as np\n'), ((8454, 8489), 'numpy.any', 'np.any', (['(binormal_forearm_points < 0)'], {}), '(binormal_forearm_points < 0)\n', (8460, 8489), True, 'import numpy as np\n'), ((8638, 8669), 'numpy.linalg.norm', 'np.linalg.norm', (['normal_upperarm'], {}), '(normal_upperarm)\n', (8652, 8669), True, 'import numpy as np\n'), ((8706, 8725), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (8714, 8725), True, 'import numpy as np\n'), ((8790, 8822), 'numpy.linalg.norm', 'np.linalg.norm', (['tangent_upperarm'], {}), '(tangent_upperarm)\n', (8804, 8822), True, 'import numpy as np\n'), ((8943, 8976), 'numpy.linalg.norm', 'np.linalg.norm', (['binormal_upperarm'], {}), '(binormal_upperarm)\n', (8957, 8976), True, 'import numpy as np\n'), ((9205, 9240), 'numpy.any', 'np.any', (['(tangent_upperarm_points > 0)'], {}), '(tangent_upperarm_points > 0)\n', (9211, 9240), True, 'import numpy as np\n'), ((9245, 9280), 'numpy.any', 'np.any', (['(tangent_upperarm_points < 0)'], {}), '(tangent_upperarm_points < 0)\n', (9251, 9280), True, 'import numpy as np\n'), ((9285, 9321), 'numpy.any', 'np.any', (['(binormal_upperarm_points > 0)'], {}), '(binormal_upperarm_points > 0)\n', (9291, 9321), True, 'import numpy as np\n'), ((9326, 9362), 'numpy.any', 'np.any', (['(binormal_upperarm_points < 0)'], {}), '(binormal_upperarm_points < 0)\n', (9332, 9362), True, 'import numpy as np\n'), ((11783, 11827), 'numpy.linalg.norm', 'np.linalg.norm', (['(hand_end_pos - elbow_end_pos)'], {}), '(hand_end_pos - elbow_end_pos)\n', (11797, 11827), True, 'import numpy as np\n'), ((11829, 11869), 'numpy.linalg.norm', 'np.linalg.norm', (['(elbow_pos - shoulder_pos)'], {}), '(elbow_pos - shoulder_pos)\n', (11843, 11869), True, 'import numpy as np\n'), ((1663, 1721), 'pybullet.loadPlugin', 'p.loadPlugin', (['"""eglRendererPlugin"""'], {'physicsClientId': 'self.id'}), "('eglRendererPlugin', physicsClientId=self.id)\n", (1675, 1721), True, 'import pybullet as p\n'), ((2003, 2023), 'numpy.dot', 'np.dot', (['(q - pt1)', 'vec'], {}), '(q - pt1, vec)\n', (2009, 2023), True, 'import numpy as np\n'), ((2033, 2053), 'numpy.dot', 'np.dot', (['(q - pt2)', 'vec'], {}), '(q - pt2, vec)\n', (2039, 2053), True, 'import numpy as np\n'), ((4191, 4214), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (4205, 4214), True, 'import numpy as np\n'), ((2078, 2100), 'numpy.cross', 'np.cross', (['(q - pt1)', 'vec'], {}), '(q - pt1, vec)\n', (2086, 2100), True, 'import numpy as np\n'), ((3116, 3129), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3122, 3129), True, 'import numpy as np\n'), ((5703, 5725), 'numpy.cross', 'np.cross', (['(b - a)', '(c - a)'], {}), '(b - a, c - a)\n', (5711, 5725), True, 'import numpy as np\n'), ((6528, 6565), 'numpy.linalg.norm', 'np.linalg.norm', (['(wrist_pos - elbow_pos)'], {}), '(wrist_pos - elbow_pos)\n', (6542, 6565), True, 'import numpy as np\n'), ((6652, 6692), 'numpy.linalg.norm', 'np.linalg.norm', (['(shoulder_pos - elbow_pos)'], {}), '(shoulder_pos - elbow_pos)\n', (6666, 6692), True, 'import numpy as np\n'), ((3080, 3093), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3086, 3093), True, 'import numpy as np\n'), ((6412, 6449), 'numpy.linalg.norm', 'np.linalg.norm', (['(wrist_pos - elbow_pos)'], {}), '(wrist_pos - elbow_pos)\n', (6426, 6449), True, 'import numpy as np\n'), ((4948, 4961), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4954, 4961), True, 'import numpy as np\n'), ((4912, 4925), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4918, 4925), True, 'import numpy as np\n')] |
import bpy
import numpy
import mathutils
from enum import Enum
from mathutils import Matrix, Quaternion, Vector
import xml.etree.ElementTree as ET
import os
os.system('cls')
mesh_targets = {}
controller_targets = {}
images = {}
class SourceType(Enum):
Name_array = 0
float_array = 1
class DataType(Enum):
string = 0
float = 1
float4x4 = 2
class Param:
name = ''
type = DataType.string
def __init__(self, n, t):
self.name = n
self.type = t
class AnimeChs:
def __init__(self):
self.locChs = []
self.quatChs = []
self.scaleChs = []
def addInputBlock(domNode, semantic, source, offset=None):
input = ET.SubElement(domNode, 'input')
input.set('semantic', semantic)
input.set('source', source)
if(offset != None):
input.set('offset', str(offset))
def buildSource(domNode, strdata, count, id, params, sourceType=SourceType.float_array):
sourceNode = ET.SubElement(domNode, 'source')
sourceNode.set('id', id)
data = ET.SubElement(sourceNode, sourceType.name)
data.set('id', id + '.data')
data.set('count', str(count))
data.text = strdata
techcom = ET.SubElement(sourceNode, 'technique_common')
accessor = ET.SubElement(techcom, 'accessor')
accessor.set('source', '#' + id + '.data')
stride = 0
for p in params:
t = p.type
param = ET.SubElement(accessor, 'param')
param.set('name', p.name)
param.set('type', t.name)
if( t == DataType.string or t == DataType.float):
stride += 1
elif ( t == DataType.float4x4 ):
stride += 16
if(stride != 0):
accessor.set('count', str(int(count/stride)))
accessor.set('stride', str(stride))
def matrixToStrList(mat, transpose):
if(transpose):
mat.transpose()
vals = numpy.asarray(mat).ravel()
matText = ' '.join( "{:.4f}".format(x) for x in vals )
return matText
def loadBonesTree( root, domNode, namebase ):
boneStack = []
domStack = []
boneStack.append(root)
domStack.append(domNode)
while len(boneStack) != 0:
cb = boneStack.pop()
dom = domStack.pop()
name = cb.name
dom.set('id', namebase + '.' + name)
dom.set('sid', name)
dom.set('type', 'JOINT')
matrix = ET.SubElement(dom, 'matrix')
matrix.set('sid', 'LOCALBINDING')
matrixInv = ET.SubElement(dom, 'matrix')
matrixInv.set('sid', 'INVBINDING')
parentMatInv = Matrix.Identity(4)
if(cb.parent != None):
parentMatInv = cb.parent.matrix_local.copy()
parentMatInv.invert()
localMat = cb.matrix_local.copy();
mat = parentMatInv * localMat
localMat.invert()
matrix.text = matrixToStrList(mat, True)
matrixInv.text = matrixToStrList(localMat, True)
for c in cb.children:
dc = ET.SubElement(dom, 'node')
boneStack.append(c)
domStack.append(dc)
def loadNodeArmature(obj, domNode):
armature = obj.data
matText = matrixToStrList(obj.matrix_world.copy(), True)
matNode = ET.SubElement(domNode, 'matrix')
matNode.text = matText
roots = []
bones = armature.bones
for b in bones:
if(b.parent == None):
roots.append(b)
for r in roots:
boneRoot = ET.SubElement(domNode, 'node')
loadBonesTree(r, boneRoot, obj.name)
def loadNodeMesh(obj, domNode ):
matText = matrixToStrList(obj.matrix_world.copy(), True)
matNode = ET.SubElement(domNode, 'matrix')
matNode.text = matText
mesh = obj.data
mesh_targets[mesh.name] = mesh
instGeo = ET.SubElement(domNode, 'instance_geometry')
instGeo.set('url', '#' + mesh.name)
for m in obj.modifiers:
id = m.name + '.' + obj.name + '.skin'
instCtrl = ET.SubElement(domNode, 'instance_controller')
instCtrl.set('url', '#' + id)
ctrlMeta = { 'object': obj, 'mesh': mesh, 'modifier': m}
controller_targets[id] = ctrlMeta
def loadLibControllers( lib_controllers ):
for c in controller_targets:
meta = controller_targets[c]
obj = meta['object']
mesh = meta['mesh']
modifier = meta['modifier'].object
vGroups = obj.vertex_groups
sourceName_0 = c + '.groups'
vertGroups = []
for vg in vGroups:
vertGroups.append(vg.name)
bonesNameList = ' '.join( n for n in vertGroups)
weightDictionary = {}
weights = []
vcount = []
v = []
vertices = mesh.vertices
for vert in vertices:
vcount.append(len(vert.groups))
for g in vert.groups:
if( g.weight not in weightDictionary ):
weightDictionary[g.weight] = len(weights)
weights.append(g.weight)
weightIndex = weightDictionary[g.weight]
v.append(g.group)
v.append(weightIndex)
sourceName_2 = c + '.skin.weights'
weightsStr = ' '.join( "{:.4f}".format(w) for w in weights)
ctrl = ET.SubElement(lib_controllers, 'controller')
ctrl.set('id', c)
ctrl.set('name', modifier.name)
skin = ET.SubElement(ctrl, 'skin')
skin.set('source', '#' + mesh.name)
bsmat = ET.SubElement(skin, 'bind_shape_matrix')
object = meta['object'];
bsmat.text = matrixToStrList(object.matrix_local.copy(), True)
buildSource(skin, bonesNameList, len(vGroups), sourceName_0, [ Param('GROUPS',DataType.string) ], SourceType.Name_array)
buildSource(skin, weightsStr, len(weights), sourceName_2, [Param('WEIGHT',DataType.float)], SourceType.float_array)
vertexWeightDom = ET.SubElement(skin, 'vertex_weights')
vertexWeightDom.set('count', str(len(vcount)))
addInputBlock(vertexWeightDom, 'GROUPS', '#' + sourceName_0, 0)
addInputBlock(vertexWeightDom, 'WEIGHT', '#' + sourceName_2, 1)
vcountDom = ET.SubElement(vertexWeightDom, 'vcount')
vcountDom.text = ' '.join(str(val) for val in vcount )
vDom = ET.SubElement(vertexWeightDom, 'v')
vDom.text = ' '.join(str(val) for val in v )
def loadLibGeometries( lib_geometries ):
for g in mesh_targets:
mesh = mesh_targets[g]
vertices = mesh.vertices
vertPosStrs = []
for v in vertices:
vertPosStrs.append(' '.join( "{:.4f}".format(val) for val in v.co ))
sourceNamePos = g + '.vertex.position'
vertStrData = ' '.join( str for str in vertPosStrs)
loops = mesh.loops
uvSet = 0
allUVCoordsName = []
allUVCoords = []
uvLayers = mesh.uv_layers
for uvLayer in uvLayers:
uvData = uvLayer.data
uvCoords = ['0.0 0.0'] * len(vertices)
for li in range(len(loops)):
vi = loops[li].vertex_index
uvCoords[vi] = ' '.join( "{:.4f}".format(val) for val in uvData[li].uv )
allUVCoordsName.append( g + '.uvlayer' + str(uvSet))
allUVCoords.append(uvCoords)
uvSet+=1
polygons = mesh.polygons
triangles = []
triangleNormals = []
for p in polygons:
nal = numpy.asarray(p.normal)
ni = len(triangleNormals)
triangleNormals.append(' '.join( "{:.4f}".format(val) for val in nal))
s = p.loop_start
if(p.loop_total == 3):
triangles.append( loops[s+0].vertex_index)
triangles.append(ni)
triangles.append( loops[s+1].vertex_index)
triangles.append(ni)
triangles.append( loops[s+2].vertex_index)
triangles.append(ni)
elif(p.loop_total == 4):
triangles.append( loops[s+0].vertex_index)
triangles.append(ni)
triangles.append( loops[s+1].vertex_index)
triangles.append(ni)
triangles.append( loops[s+2].vertex_index)
triangles.append(ni)
triangles.append( loops[s+0].vertex_index)
triangles.append(ni)
triangles.append( loops[s+2].vertex_index)
triangles.append(ni)
triangles.append( loops[s+3].vertex_index)
triangles.append(ni)
else:
print('Plygon has to be triangles or quads...')
sourceTriNormals = g + '.triangle.normals'
sourceTriNormalsData = ' '.join( str for str in triangleNormals)
geometry = ET.SubElement(lib_geometries, 'geometry')
geometry.set('id', g)
meshDom = ET.SubElement(geometry, 'mesh')
buildSource(meshDom, vertStrData, len(vertices) * 3, sourceNamePos,
[ Param('x',DataType.float), Param('y',DataType.float), Param('z',DataType.float) ], SourceType.float_array)
for i in range(len(allUVCoords)):
uvCoord = allUVCoords[i]
datum = ' '.join( str for str in uvCoord )
buildSource(meshDom, datum, len(allUVCoords[i]) * 2, allUVCoordsName[i],
[ Param('u',DataType.float), Param('v',DataType.float)], SourceType.float_array)
buildSource(meshDom, sourceTriNormalsData, len(triangleNormals) * 3, sourceTriNormals,
[ Param('x',DataType.float), Param('y',DataType.float), Param('z',DataType.float) ], SourceType.float_array)
verticesDom = ET.SubElement(meshDom, 'vertices')
verticesDomID = g + '.vertices'
verticesDom.set('id', verticesDomID)
vertexPosInput = ET.SubElement(verticesDom, 'input')
vertexPosInput.set('semantic', 'POSITION')
vertexPosInput.set('source', '#' + sourceNamePos)
for i in range(len(allUVCoords)):
vertexTexCoordInput = ET.SubElement(verticesDom, 'input')
vertexTexCoordInput.set('semantic', 'TEXCOORD' + str(i))
vertexTexCoordInput.set('source', '#' + allUVCoordsName[i])
trianglesDom = ET.SubElement(meshDom, 'triangles')
trianglesDom.set('count', str(int(len(triangles)/3)))
triangleInput = ET.SubElement(trianglesDom, 'input')
triangleInput.set('semantic', 'VERTEX')
triangleInput.set('source', '#' + verticesDomID)
triangleInput.set('offset', '0')
triangleInput = ET.SubElement(trianglesDom, 'input')
triangleInput.set('semantic', 'NORMAL')
triangleInput.set('source', '#' + sourceTriNormals)
triangleInput.set('offset', '1')
pData = ' '.join( str(v) for v in triangles)
pDom = ET.SubElement(trianglesDom, 'p')
pDom.text = pData
def loadLibVisualScene( lib_visual_scene ):
objscene = bpy.data.scenes[0]
domScene = ET.SubElement(lib_visual_scene, 'visual_scene')
objs = objscene.objects
for obj in objs:
objName = obj.name
objType = obj.type
domNode = ET.SubElement(domScene, 'node')
domNode.set('id', objName)
domNode.set('obj_type', objType)
domNode.set('type', 'NODE')
if(obj.type == 'MESH'):
loadNodeMesh(obj, domNode)
elif(obj.type == 'ARMATURE'):
loadNodeArmature(obj, domNode)
def buildAnimation( node, strip, armature ):
if(strip == None):
return;
action = strip.action
actionIDRoot = action.id_root
if(actionIDRoot == 'MESH'):
#print('Handle fcurve in MESH mode')
#1. pick up vertices that changes in the clip.
#2. build source, channel, sampler for each such vertex.
fcurves = action.fcurves
print('Build sources and channels for vertices ' + str(len(fcurves)))
print('Removing dead vertex is required.')
elif (actionIDRoot == 'OBJECT'):
channels = action.fcurves
boneTimeSets = {}
boneTimelines = {}
boneAnimes = {}
for ch in channels:
rna = ch.data_path
f0 = rna.find('\"') + 1
f1 = rna.find('\"', f0)
boneName = rna[f0:f1]
locRotScalType = rna.split('.')[-1]
bone = armature.bones[boneName]
if(boneName not in boneTimeSets):
boneTimeSets[boneName] = set()
kfpts = ch.keyframe_points
for kf in kfpts:
boneTimeSets[boneName].add(kf.co[0])
if(boneName not in boneAnimes):
boneAnimes[boneName] = AnimeChs()
boneAnime = boneAnimes[boneName]
if(locRotScalType == 'rotation_quaternion'):
boneAnime.quatChs.append(ch)
elif(locRotScalType == 'location'):
boneAnime.locChs.append(ch)
elif(locRotScalType == 'scale'):
boneAnime.scaleChs.append(ch)
boneFCurves = {}
boneInterpolations = {}
for bn in boneAnimes:
abone = armature.bones[bn]
connect = abone.use_connect
timeline = list( boneTimeSets[bn])
timeline.sort()
boneTimelines[bn] = timeline
boneAnime = boneAnimes[bn]
if(bn not in boneFCurves):
boneFCurves[bn] = []
boneMatStr = boneFCurves[bn]
if(bn not in boneInterpolations):
boneInterpolations[bn] = []
boneInterpolation = boneInterpolations[bn]
for tl in timeline:
location= []
quaternion = []
scale = []
if(not connect):
for ch in boneAnime.locChs:
location.append(ch.evaluate(tl))
for ch in boneAnime.quatChs:
quaternion.append(ch.evaluate(tl))
for ch in boneAnime.scaleChs:
scale.append(ch.evaluate(tl))
matLoc = Matrix.Identity(4) if len(location) != 3 else Matrix.Translation( ( location[0], location[1], location[2]) )
matRot = Matrix.Identity(4) if len(quaternion) != 4 else Quaternion( (quaternion[0], quaternion[1], quaternion[2], quaternion[3]) ).to_matrix().to_4x4()
matScl = Matrix.Identity(4)
if( len(scale) == 3):
matScl[0][0] = scale[0]
matScl[1][1] = scale[1]
matScl[2][2] = scale[2]
mat = matRot * matScl * matLoc
matStrs = matrixToStrList(mat, True)
boneMatStr.append(matStrs)
boneInterpolation.append('LINEAR')
for bn in boneFCurves:
timeline = boneTimelines[bn]
timelineDatumName = bn + '.timeline'
datumTimeline = ' '.join( str(v) for v in timeline)
buildSource(node, datumTimeline, len(timeline), timelineDatumName,
[ Param('TIME',DataType.float) ], SourceType.float_array)
transMats = boneFCurves[bn]
transformName = bn + '.transform'
datumTransform = ' '.join( v for v in transMats )
buildSource(node, datumTransform, len(transMats) * 16, transformName,
[ Param('TRANSFORM',DataType.float4x4) ], SourceType.float_array)
interpolation = boneInterpolations[bn]
interpoName = bn + '.interpolation'
datumInterpo = ' '.join( v for v in interpolation )
buildSource(node, datumInterpo, len(interpolation), interpoName,
[ Param('INTERPOLATION',DataType.string) ], SourceType.Name_array)
samplerID = bn + '.sampler'
sampler = ET.SubElement(node, 'sampler')
sampler.set('id', samplerID)
addInputBlock(sampler, 'INPUT', '#' + timelineDatumName)
addInputBlock(sampler, 'OUTPUT', '#' + transformName)
addInputBlock(sampler, 'INTERPOLATION', '#' + interpoName)
channel = ET.SubElement(node, 'channel')
channel.set('source', '#' + samplerID)
channel.set('target', bn + '/transform')
# DO NOT Support MESH animation yet.
# ONLY support linear matrix interpolation for smaller file size.
def loadLibAnimations(lib_animations):
objscene = bpy.data.scenes[0]
objs = objscene.objects
for obj in objs:
obj.update_from_editmode()
objName = obj.name
objType = obj.type
animData = None
type = None
if(objType == 'ARMATURE'):
animData = obj.animation_data
#elif(objType == 'MESH' and obj.data.animation_data != None ):
# animData = obj.data.animation_data
if(animData != None):
tracks = animData.nla_tracks
for tra in tracks:
traNode = ET.SubElement(lib_animations, 'animation')
traNode.set('id', objName + '.' + tra.name)
strip = tra.strips[0]
buildAnimation(traNode, strip, obj.data)
def prettify( root ):
lvstack = []
elmstack = []
lvstack.append(0)
elmstack.append(root)
while len(elmstack) != 0:
lv = lvstack.pop()
p = elmstack.pop()
if(len(p) != 0 ):
p.text = '\n' + (lv + 1) * '\t'
for c in reversed(p):
c.tail = '\n' + (lv + 1) * '\t'
elmstack.append(c)
lvstack.append(lv + 1)
p[-1].tail = '\n' + lv * '\t'
def export( context, filepath ):
collada = ET.Element('COLLADA')
collada.set('xmlns', 'http://www.collada.org/2005/11/COLLADASchema')
collada.set('version', '1.5.0')
collada.set('xmlns:xsi', 'http://www.w3.org/2001/XMLSchema-instance')
lib_animations = ET.SubElement(collada, 'library_animations')
lib_geometries = ET.SubElement(collada, 'library_geometries')
lib_controllers = ET.SubElement(collada, 'library_controllers')
lib_visual_sence = ET.SubElement(collada, 'library_visual_scenes')
loadLibVisualScene(lib_visual_sence)
loadLibGeometries(lib_geometries)
loadLibControllers(lib_controllers)
loadLibAnimations(lib_animations)
prettify(collada)
tree = ET.ElementTree(collada)
tree.write(filepath, encoding="utf-8", xml_declaration=True)
#### comment this test output part when deploying. ####
#export(bpy.context, r'D://projects//dae_library//assets//dae_dev_mesh.dae')
| [
"mathutils.Matrix.Identity",
"numpy.asarray",
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.ElementTree",
"mathutils.Quaternion",
"mathutils.Matrix.Translation",
"os.system",
"xml.etree.ElementTree.SubElement"
] | [((158, 174), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (167, 174), False, 'import os\n'), ((695, 726), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['domNode', '"""input"""'], {}), "(domNode, 'input')\n", (708, 726), True, 'import xml.etree.ElementTree as ET\n'), ((975, 1007), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['domNode', '"""source"""'], {}), "(domNode, 'source')\n", (988, 1007), True, 'import xml.etree.ElementTree as ET\n'), ((1048, 1090), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['sourceNode', 'sourceType.name'], {}), '(sourceNode, sourceType.name)\n', (1061, 1090), True, 'import xml.etree.ElementTree as ET\n'), ((1202, 1247), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['sourceNode', '"""technique_common"""'], {}), "(sourceNode, 'technique_common')\n", (1215, 1247), True, 'import xml.etree.ElementTree as ET\n'), ((1263, 1297), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['techcom', '"""accessor"""'], {}), "(techcom, 'accessor')\n", (1276, 1297), True, 'import xml.etree.ElementTree as ET\n'), ((3232, 3264), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['domNode', '"""matrix"""'], {}), "(domNode, 'matrix')\n", (3245, 3264), True, 'import xml.etree.ElementTree as ET\n'), ((3645, 3677), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['domNode', '"""matrix"""'], {}), "(domNode, 'matrix')\n", (3658, 3677), True, 'import xml.etree.ElementTree as ET\n'), ((3779, 3822), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['domNode', '"""instance_geometry"""'], {}), "(domNode, 'instance_geometry')\n", (3792, 3822), True, 'import xml.etree.ElementTree as ET\n'), ((11149, 11196), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['lib_visual_scene', '"""visual_scene"""'], {}), "(lib_visual_scene, 'visual_scene')\n", (11162, 11196), True, 'import xml.etree.ElementTree as ET\n'), ((18084, 18105), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""COLLADA"""'], {}), "('COLLADA')\n", (18094, 18105), True, 'import xml.etree.ElementTree as ET\n'), ((18315, 18359), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['collada', '"""library_animations"""'], {}), "(collada, 'library_animations')\n", (18328, 18359), True, 'import xml.etree.ElementTree as ET\n'), ((18381, 18425), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['collada', '"""library_geometries"""'], {}), "(collada, 'library_geometries')\n", (18394, 18425), True, 'import xml.etree.ElementTree as ET\n'), ((18452, 18497), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['collada', '"""library_controllers"""'], {}), "(collada, 'library_controllers')\n", (18465, 18497), True, 'import xml.etree.ElementTree as ET\n'), ((18521, 18568), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['collada', '"""library_visual_scenes"""'], {}), "(collada, 'library_visual_scenes')\n", (18534, 18568), True, 'import xml.etree.ElementTree as ET\n'), ((18769, 18792), 'xml.etree.ElementTree.ElementTree', 'ET.ElementTree', (['collada'], {}), '(collada)\n', (18783, 18792), True, 'import xml.etree.ElementTree as ET\n'), ((1416, 1448), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['accessor', '"""param"""'], {}), "(accessor, 'param')\n", (1429, 1448), True, 'import xml.etree.ElementTree as ET\n'), ((2380, 2408), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['dom', '"""matrix"""'], {}), "(dom, 'matrix')\n", (2393, 2408), True, 'import xml.etree.ElementTree as ET\n'), ((2471, 2499), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['dom', '"""matrix"""'], {}), "(dom, 'matrix')\n", (2484, 2499), True, 'import xml.etree.ElementTree as ET\n'), ((2575, 2593), 'mathutils.Matrix.Identity', 'Matrix.Identity', (['(4)'], {}), '(4)\n', (2590, 2593), False, 'from mathutils import Matrix, Quaternion, Vector\n'), ((3456, 3486), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['domNode', '"""node"""'], {}), "(domNode, 'node')\n", (3469, 3486), True, 'import xml.etree.ElementTree as ET\n'), ((3962, 4007), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['domNode', '"""instance_controller"""'], {}), "(domNode, 'instance_controller')\n", (3975, 4007), True, 'import xml.etree.ElementTree as ET\n'), ((5281, 5325), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['lib_controllers', '"""controller"""'], {}), "(lib_controllers, 'controller')\n", (5294, 5325), True, 'import xml.etree.ElementTree as ET\n'), ((5416, 5443), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['ctrl', '"""skin"""'], {}), "(ctrl, 'skin')\n", (5429, 5443), True, 'import xml.etree.ElementTree as ET\n'), ((5513, 5553), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['skin', '"""bind_shape_matrix"""'], {}), "(skin, 'bind_shape_matrix')\n", (5526, 5553), True, 'import xml.etree.ElementTree as ET\n'), ((5964, 6001), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['skin', '"""vertex_weights"""'], {}), "(skin, 'vertex_weights')\n", (5977, 6001), True, 'import xml.etree.ElementTree as ET\n'), ((6230, 6270), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['vertexWeightDom', '"""vcount"""'], {}), "(vertexWeightDom, 'vcount')\n", (6243, 6270), True, 'import xml.etree.ElementTree as ET\n'), ((6349, 6384), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['vertexWeightDom', '"""v"""'], {}), "(vertexWeightDom, 'v')\n", (6362, 6384), True, 'import xml.etree.ElementTree as ET\n'), ((8907, 8948), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['lib_geometries', '"""geometry"""'], {}), "(lib_geometries, 'geometry')\n", (8920, 8948), True, 'import xml.etree.ElementTree as ET\n'), ((8997, 9028), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['geometry', '"""mesh"""'], {}), "(geometry, 'mesh')\n", (9010, 9028), True, 'import xml.etree.ElementTree as ET\n'), ((9803, 9837), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['meshDom', '"""vertices"""'], {}), "(meshDom, 'vertices')\n", (9816, 9837), True, 'import xml.etree.ElementTree as ET\n'), ((9948, 9983), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['verticesDom', '"""input"""'], {}), "(verticesDom, 'input')\n", (9961, 9983), True, 'import xml.etree.ElementTree as ET\n'), ((10378, 10413), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['meshDom', '"""triangles"""'], {}), "(meshDom, 'triangles')\n", (10391, 10413), True, 'import xml.etree.ElementTree as ET\n'), ((10509, 10545), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['trianglesDom', '"""input"""'], {}), "(trianglesDom, 'input')\n", (10522, 10545), True, 'import xml.etree.ElementTree as ET\n'), ((10725, 10761), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['trianglesDom', '"""input"""'], {}), "(trianglesDom, 'input')\n", (10738, 10761), True, 'import xml.etree.ElementTree as ET\n'), ((10988, 11020), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['trianglesDom', '"""p"""'], {}), "(trianglesDom, 'p')\n", (11001, 11020), True, 'import xml.etree.ElementTree as ET\n'), ((11318, 11349), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['domScene', '"""node"""'], {}), "(domScene, 'node')\n", (11331, 11349), True, 'import xml.etree.ElementTree as ET\n'), ((1876, 1894), 'numpy.asarray', 'numpy.asarray', (['mat'], {}), '(mat)\n', (1889, 1894), False, 'import numpy\n'), ((2998, 3024), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['dom', '"""node"""'], {}), "(dom, 'node')\n", (3011, 3024), True, 'import xml.etree.ElementTree as ET\n'), ((7510, 7533), 'numpy.asarray', 'numpy.asarray', (['p.normal'], {}), '(p.normal)\n', (7523, 7533), False, 'import numpy\n'), ((10169, 10204), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['verticesDom', '"""input"""'], {}), "(verticesDom, 'input')\n", (10182, 10204), True, 'import xml.etree.ElementTree as ET\n'), ((16206, 16236), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['node', '"""sampler"""'], {}), "(node, 'sampler')\n", (16219, 16236), True, 'import xml.etree.ElementTree as ET\n'), ((16519, 16549), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['node', '"""channel"""'], {}), "(node, 'channel')\n", (16532, 16549), True, 'import xml.etree.ElementTree as ET\n'), ((17368, 17410), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['lib_animations', '"""animation"""'], {}), "(lib_animations, 'animation')\n", (17381, 17410), True, 'import xml.etree.ElementTree as ET\n'), ((14698, 14716), 'mathutils.Matrix.Identity', 'Matrix.Identity', (['(4)'], {}), '(4)\n', (14713, 14716), False, 'from mathutils import Matrix, Quaternion, Vector\n'), ((14395, 14413), 'mathutils.Matrix.Identity', 'Matrix.Identity', (['(4)'], {}), '(4)\n', (14410, 14413), False, 'from mathutils import Matrix, Quaternion, Vector\n'), ((14441, 14500), 'mathutils.Matrix.Translation', 'Matrix.Translation', (['(location[0], location[1], location[2])'], {}), '((location[0], location[1], location[2]))\n', (14459, 14500), False, 'from mathutils import Matrix, Quaternion, Vector\n'), ((14529, 14547), 'mathutils.Matrix.Identity', 'Matrix.Identity', (['(4)'], {}), '(4)\n', (14544, 14547), False, 'from mathutils import Matrix, Quaternion, Vector\n'), ((14577, 14649), 'mathutils.Quaternion', 'Quaternion', (['(quaternion[0], quaternion[1], quaternion[2], quaternion[3])'], {}), '((quaternion[0], quaternion[1], quaternion[2], quaternion[3]))\n', (14587, 14649), False, 'from mathutils import Matrix, Quaternion, Vector\n')] |
#!/usr/bin/python
import os
import sys
import glob
import argparse
import tempfile
import numpy as np
import matplotlib.pyplot as plt
import pickle
import scipy.stats as stats
from copy import deepcopy
from subprocess import Popen, PIPE
from get_qdec_info import get_qdec_info
from fs_load_stats import fs_load_stats
from aparc12 import *
from scai_utils import *
from scai_stats import cohens_d
BASE_DIR = "/users/cais/STUT/analysis/aparc12_tracts"
DATA_DIR = "/users/cais/STUT/DATA"
FSDATA_DIR = "/users/cais/STUT/FSDATA"
CTAB = "/users/cais/STUT/slaparc_550.ctab"
SEGSTATS_SUM_WC = "aparc12_wm%dmm.segstats.txt"
P_THRESH_UNC = 0.05
hemis = ["lh", "rh"]
grps = ["PFS", "PWS"]
grpColors = {"PFS": [0, 0, 0], "PWS": [1, 0, 0]}
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="Analyze aparc12 surface annotation: Surface area and average thickness")
ap.add_argument("-r", dest="bReload", action="store_true", \
help="Reload data (time-consuming)")
# ap.add_argument("hemi", help="Hemisphere {lh, rh}")
# if len(sys.argv) == 1:
# ap.print_help()
# sys.exit(0)
# === Args input arguments === #
args = ap.parse_args()
bReload = args.bReload
# hemi = args.hemi
# assert(hemis.count(hemi) == 1)
# === Determine the subject list and their group memberships === #
check_dir(BASE_DIR)
ds = glob.glob(os.path.join(BASE_DIR, "S??"))
ds.sort()
sIDs = []
isPWS = []
SSI4 = []
for (i0, t_path) in enumerate(ds):
(t_path_0, t_sID) = os.path.split(t_path)
sIDs.append(t_sID)
SSI4.append(get_qdec_info(t_sID, "SSI"))
if get_qdec_info(t_sID, "diagnosis") == "PWS":
isPWS.append(1)
else:
isPWS.append(0)
isPWS = np.array(isPWS)
SSI4 = np.array(SSI4)
assert(len(sIDs) > 0)
assert(len(sIDs) == len(isPWS))
# === Get the list of cortical ROIs (Speech network only) ===
rois0 = get_aparc12_cort_rois(bSpeech=True)
check_file(CTAB)
(ctab_roi_nums, ctab_roi_names) = read_ctab(CTAB)
# Duplex into both hemispheres
roi_names = []
roi_nums = []
for (i0, hemi) in enumerate(hemis):
for (i1, roi) in enumerate(rois0):
t_roi_name = "%s_%s" % (hemi, roi)
assert(ctab_roi_names.count(t_roi_name) == 1)
idx = ctab_roi_names.index(t_roi_name)
roi_names.append(t_roi_name)
roi_nums.append(ctab_roi_nums[idx])
assert(len(roi_names) == len(roi_nums))
# === Load data: Loop through all subjects === #
cachePklFN = "aparc12_surface_stats_dset.pkl"
nROIs = len(roi_names)
ns = len(sIDs)
if bReload:
print("INFO: bReload = True: Reloading data (time-consuming)\n")
labArea = np.zeros([ns, nROIs])
labAvgThick = np.zeros([ns, nROIs])
# Label area normalized by hemisphere surface area
labAreaNorm = np.zeros([ns, nROIs])
for (i0, sID) in enumerate(sIDs):
t_rois = []
t_roi_nums = []
t_area = []
t_area_norm = []
t_thick = []
for (i1, hemi) in enumerate(hemis):
# == Load hemisphere total surface area == #
hemiStatsFN = os.path.join(FSDATA_DIR, sID, \
"stats", "%s.aparc.stats" % hemi)
check_file(hemiStatsFN)
t_hemiSurfArea = fs_load_stats(hemiStatsFN, "SurfArea")
tmpfn = tempfile.mktemp()
hthick = os.path.join(FSDATA_DIR, sID, "surf", \
"%s.thickness" % hemi)
check_file(hthick)
print("Loading data from subject %s, hemisphere %s" \
% (sID, hemi))
(sout, serr) = Popen(["mri_segstats", "--annot", \
sID, hemi, "aparc12", \
"--in", hthick, \
"--sum", tmpfn], \
stdout=PIPE, stderr=PIPE).communicate()
sout = read_text_file(tmpfn)
os.system("rm -rf %s" % tmpfn)
k0 = 0
while (sout[k0].startswith("# ")):
k0 = k0 + 1
sout = sout[k0 :]
for tline in sout:
if len(tline) == 0:
break;
t_items = remove_empty_strings(\
tline.replace('\t', ' ').split(' '))
if len(t_items) == 10:
t_rois.append(hemi + "_" + t_items[4])
if hemi == "lh":
t_roi_nums.append(1000 + int(t_items[1]))
else:
t_roi_nums.append(2000 + int(t_items[1]))
t_area.append(float(t_items[3]))
t_area_norm.append(float(t_items[3]) / t_hemiSurfArea)
t_thick.append(float(t_items[5]))
# == Matching and filling values == #
for (i2, t_rn) in enumerate(roi_nums):
if t_roi_nums.count(t_rn) > 0:
idx = t_roi_nums.index(t_rn)
labArea[i0][i2] = t_area[idx]
labAreaNorm[i0][i2] = t_area_norm[idx]
labAvgThick[i0][i2] = t_thick[idx]
# === Save to pkl file === #
dset = {"labArea": labArea, \
"labAreaNorm": labAreaNorm, \
"labAvgThick": labAvgThick}
os.system("rm -rf %s" % cachePklFN)
cachePklF = open(cachePklFN, "wb")
pickle.dump(dset, cachePklF)
cachePklF.close()
check_file(cachePklFN)
print("INFO: Saved loaded data to file: %s\n" % os.path.abspath(cachePklFN))
else:
print("INFO: Loading saved data from file: %s\n" % os.path.abspath(cachePklFN))
cachePklF = open(cachePklFN, "rb")
dset = pickle.load(cachePklF)
cachePklF.close()
labArea = dset["labArea"]
labAreaNorm = dset["labAreaNorm"]
labAvgThick = dset["labAvgThick"]
# === Check data validity === #
assert(len(labArea) == ns)
assert(len(labAreaNorm) == ns)
assert(len(labAvgThick) == ns)
# === Statistical comparison === #
mean_area = {}
std_area = {}
nsg = {}
for (i0, grp) in enumerate(grps):
nsg[grp] = len(np.nonzero(isPWS == i0))
mean_area[grp] = np.mean(labArea[isPWS == i0], axis=0)
# std_area[grp] = np.std(labArea[isPWS == i0], axis=0) / np.sqrt(nsg[grp])
std_area[grp] = np.std(labArea[isPWS == i0], axis=0)
cmprItems = ["labArea", "labAreaNorm", "labAvgThick"]
for (h0, cmprItem) in enumerate(cmprItems):
print("--- List of significant differences in %s (p < %f uncorrected) ---" \
% (cmprItem, P_THRESH_UNC))
p_tt_val = np.zeros([nROIs])
t_tt_val = np.zeros([nROIs])
for (i0, t_roi) in enumerate(roi_names):
if h0 == 0:
dat_PWS = labArea[isPWS == 1, i0]
dat_PFS = labArea[isPWS == 0, i0]
elif h0 == 1:
dat_PWS = labAreaNorm[isPWS == 1, i0]
dat_PFS = labAreaNorm[isPWS == 0, i0]
elif h0 == 2:
dat_PWS = labAvgThick[isPWS == 1, i0]
dat_PFS = labAvgThick[isPWS == 0, i0]
(t_tt, p_tt) = stats.ttest_ind(dat_PWS, dat_PFS)
p_tt_val[i0] = p_tt
t_tt_val[i0] = t_tt
if p_tt_val[i0] < P_THRESH_UNC:
if t_tt_val[i0] < 0:
dirString = "PWS < PFS"
else:
dirString = "PWS > PFS"
print("%s: p = %f; t = %f (%s)" \
% (t_roi, p_tt_val[i0], t_tt_val[i0], dirString))
print("\tMean +/- SD: PWS: %.5f +/- %.5f; PFS: %.5f +/- %.5f" \
% (np.mean(dat_PWS), np.std(dat_PWS), \
np.mean(dat_PFS), np.std(dat_PFS)))
print("\tCohens_d = %.3f" % cohens_d(dat_PWS, dat_PFS))
print("\n")
# === Spearman correlation === #
for (h0, cmprItem) in enumerate(cmprItems):
print("--- Spearman correlations with SSI4 in %s (p < %f uncorrected) ---" \
% (cmprItem, P_THRESH_UNC))
p_spc_val = np.zeros([nROIs])
rho_spc_val = np.zeros([nROIs])
for (i0, t_roi) in enumerate(roi_names):
if h0 == 0:
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], \
labArea[isPWS == 1, i0])
elif h0 == 1:
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], \
labAreaNorm[isPWS == 1, i0])
elif h0 == 2:
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], \
labAvgThick[isPWS == 1, i0])
p_spc_val[i0] = p_spc
rho_spc_val[i0] = r_spc
if p_spc_val[i0] < P_THRESH_UNC:
if rho_spc_val[i0] < 0:
dirString = "-"
else:
dirString = "+"
print("%s: p = %f; rho = %f (%s)" \
% (t_roi, p_spc_val[i0], rho_spc_val[i0], dirString))
print("\n")
# === Compare combined dIFo and vIFo === #
lh_IFo_area = {}
lh_IFo_areaNorm = {}
for (i0, grp) in enumerate(grps):
lh_IFo_area[grp] = labArea[isPWS == i0, roi_names.index("lh_vIFo")] + \
labArea[isPWS == i0, roi_names.index("lh_dIFo")]
lh_IFo_areaNorm[grp] = labAreaNorm[isPWS == i0, roi_names.index("lh_vIFo")] + \
labAreaNorm[isPWS == i0, roi_names.index("lh_dIFo")]
(t_tt, p_tt) = stats.ttest_ind(lh_IFo_area["PWS"], \
lh_IFo_area["PFS"])
print("-- Comparing lh_IFo area: --")
print("\tp = %f; t = %f" % (p_tt, t_tt))
print("\tPWS: %.1f +/- %.1f; PFS: %.1f +/- %.1f" \
% (np.mean(lh_IFo_area["PWS"]), np.std(lh_IFo_area["PWS"]), \
np.mean(lh_IFo_area["PFS"]), np.std(lh_IFo_area["PFS"])))
print("\n")
(t_tt, p_tt) = stats.ttest_ind(lh_IFo_areaNorm["PWS"], \
lh_IFo_areaNorm["PFS"])
print("-- Comparing lh_IFo areaNorm: --")
print("\tp = %f; t = %f" % (p_tt, t_tt))
print("\tPWS: %.1e +/- %.1e; PFS: %.1e +/- %.1e" \
% (np.mean(lh_IFo_areaNorm["PWS"]), np.std(lh_IFo_areaNorm["PWS"]), \
np.mean(lh_IFo_areaNorm["PFS"]), np.std(lh_IFo_areaNorm["PFS"])))
# === Correlating combined IFo with SSI4 === #
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], lh_IFo_area["PWS"])
print("-- Correlating SSI4 with lh_IFo area: --")
print("\tp = %f; rho = %f" % (p_spc, r_spc))
print("\n")
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], lh_IFo_areaNorm["PWS"])
print("-- Correlating SSI4 with lh_IFo areaNorm: --")
print("\tp = %f; rho = %f" % (p_spc, r_spc))
print("\n")
# === Visualiation === #
"""
for (i0, grp) in enumerate(grps):
plt.errorbar(range(nROIs), mean_area[grp], yerr=std_area[grp], \
color=grpColors[grp])
plt.xticks(range(nROIs), roi_names, rotation=90.0)
plt.show()
"""
| [
"numpy.array",
"scipy.stats.ttest_ind",
"numpy.mean",
"argparse.ArgumentParser",
"subprocess.Popen",
"os.path.split",
"scipy.stats.spearmanr",
"get_qdec_info.get_qdec_info",
"pickle.load",
"numpy.nonzero",
"numpy.std",
"os.path.abspath",
"fs_load_stats.fs_load_stats",
"pickle.dump",
"os.... | [((770, 884), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Analyze aparc12 surface annotation: Surface area and average thickness"""'}), "(description=\n 'Analyze aparc12 surface annotation: Surface area and average thickness')\n", (793, 884), False, 'import argparse\n'), ((1810, 1825), 'numpy.array', 'np.array', (['isPWS'], {}), '(isPWS)\n', (1818, 1825), True, 'import numpy as np\n'), ((1837, 1851), 'numpy.array', 'np.array', (['SSI4'], {}), '(SSI4)\n', (1845, 1851), True, 'import numpy as np\n'), ((10177, 10232), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["lh_IFo_area['PWS']", "lh_IFo_area['PFS']"], {}), "(lh_IFo_area['PWS'], lh_IFo_area['PFS'])\n", (10192, 10232), True, 'import scipy.stats as stats\n'), ((10591, 10654), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["lh_IFo_areaNorm['PWS']", "lh_IFo_areaNorm['PFS']"], {}), "(lh_IFo_areaNorm['PWS'], lh_IFo_areaNorm['PFS'])\n", (10606, 10654), True, 'import scipy.stats as stats\n'), ((11075, 11128), 'scipy.stats.spearmanr', 'stats.spearmanr', (['SSI4[isPWS == 1]', "lh_IFo_area['PWS']"], {}), "(SSI4[isPWS == 1], lh_IFo_area['PWS'])\n", (11090, 11128), True, 'import scipy.stats as stats\n'), ((11270, 11327), 'scipy.stats.spearmanr', 'stats.spearmanr', (['SSI4[isPWS == 1]', "lh_IFo_areaNorm['PWS']"], {}), "(SSI4[isPWS == 1], lh_IFo_areaNorm['PWS'])\n", (11285, 11327), True, 'import scipy.stats as stats\n'), ((1409, 1438), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""S??"""'], {}), "(BASE_DIR, 'S??')\n", (1421, 1438), False, 'import os\n'), ((1565, 1586), 'os.path.split', 'os.path.split', (['t_path'], {}), '(t_path)\n', (1578, 1586), False, 'import os\n'), ((2844, 2865), 'numpy.zeros', 'np.zeros', (['[ns, nROIs]'], {}), '([ns, nROIs])\n', (2852, 2865), True, 'import numpy as np\n'), ((2888, 2909), 'numpy.zeros', 'np.zeros', (['[ns, nROIs]'], {}), '([ns, nROIs])\n', (2896, 2909), True, 'import numpy as np\n'), ((2991, 3012), 'numpy.zeros', 'np.zeros', (['[ns, nROIs]'], {}), '([ns, nROIs])\n', (2999, 3012), True, 'import numpy as np\n'), ((5768, 5803), 'os.system', 'os.system', (["('rm -rf %s' % cachePklFN)"], {}), "('rm -rf %s' % cachePklFN)\n", (5777, 5803), False, 'import os\n'), ((5855, 5883), 'pickle.dump', 'pickle.dump', (['dset', 'cachePklF'], {}), '(dset, cachePklF)\n', (5866, 5883), False, 'import pickle\n'), ((6184, 6206), 'pickle.load', 'pickle.load', (['cachePklF'], {}), '(cachePklF)\n', (6195, 6206), False, 'import pickle\n'), ((6693, 6730), 'numpy.mean', 'np.mean', (['labArea[isPWS == i0]'], {'axis': '(0)'}), '(labArea[isPWS == i0], axis=0)\n', (6700, 6730), True, 'import numpy as np\n'), ((6838, 6874), 'numpy.std', 'np.std', (['labArea[isPWS == i0]'], {'axis': '(0)'}), '(labArea[isPWS == i0], axis=0)\n', (6844, 6874), True, 'import numpy as np\n'), ((7129, 7146), 'numpy.zeros', 'np.zeros', (['[nROIs]'], {}), '([nROIs])\n', (7137, 7146), True, 'import numpy as np\n'), ((7166, 7183), 'numpy.zeros', 'np.zeros', (['[nROIs]'], {}), '([nROIs])\n', (7174, 7183), True, 'import numpy as np\n'), ((8655, 8672), 'numpy.zeros', 'np.zeros', (['[nROIs]'], {}), '([nROIs])\n', (8663, 8672), True, 'import numpy as np\n'), ((8695, 8712), 'numpy.zeros', 'np.zeros', (['[nROIs]'], {}), '([nROIs])\n', (8703, 8712), True, 'import numpy as np\n'), ((1634, 1661), 'get_qdec_info.get_qdec_info', 'get_qdec_info', (['t_sID', '"""SSI"""'], {}), "(t_sID, 'SSI')\n", (1647, 1661), False, 'from get_qdec_info import get_qdec_info\n'), ((1675, 1708), 'get_qdec_info.get_qdec_info', 'get_qdec_info', (['t_sID', '"""diagnosis"""'], {}), "(t_sID, 'diagnosis')\n", (1688, 1708), False, 'from get_qdec_info import get_qdec_info\n'), ((6642, 6665), 'numpy.nonzero', 'np.nonzero', (['(isPWS == i0)'], {}), '(isPWS == i0)\n', (6652, 6665), True, 'import numpy as np\n'), ((7670, 7703), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['dat_PWS', 'dat_PFS'], {}), '(dat_PWS, dat_PFS)\n', (7685, 7703), True, 'import scipy.stats as stats\n'), ((3326, 3389), 'os.path.join', 'os.path.join', (['FSDATA_DIR', 'sID', '"""stats"""', "('%s.aparc.stats' % hemi)"], {}), "(FSDATA_DIR, sID, 'stats', '%s.aparc.stats' % hemi)\n", (3338, 3389), False, 'import os\n'), ((3508, 3546), 'fs_load_stats.fs_load_stats', 'fs_load_stats', (['hemiStatsFN', '"""SurfArea"""'], {}), "(hemiStatsFN, 'SurfArea')\n", (3521, 3546), False, 'from fs_load_stats import fs_load_stats\n'), ((3572, 3589), 'tempfile.mktemp', 'tempfile.mktemp', ([], {}), '()\n', (3587, 3589), False, 'import tempfile\n'), ((3616, 3676), 'os.path.join', 'os.path.join', (['FSDATA_DIR', 'sID', '"""surf"""', "('%s.thickness' % hemi)"], {}), "(FSDATA_DIR, sID, 'surf', '%s.thickness' % hemi)\n", (3628, 3676), False, 'import os\n'), ((4262, 4292), 'os.system', 'os.system', (["('rm -rf %s' % tmpfn)"], {}), "('rm -rf %s' % tmpfn)\n", (4271, 4292), False, 'import os\n'), ((5998, 6025), 'os.path.abspath', 'os.path.abspath', (['cachePklFN'], {}), '(cachePklFN)\n', (6013, 6025), False, 'import os\n'), ((6096, 6123), 'os.path.abspath', 'os.path.abspath', (['cachePklFN'], {}), '(cachePklFN)\n', (6111, 6123), False, 'import os\n'), ((8820, 8878), 'scipy.stats.spearmanr', 'stats.spearmanr', (['SSI4[isPWS == 1]', 'labArea[isPWS == 1, i0]'], {}), '(SSI4[isPWS == 1], labArea[isPWS == 1, i0])\n', (8835, 8878), True, 'import scipy.stats as stats\n'), ((10425, 10452), 'numpy.mean', 'np.mean', (["lh_IFo_area['PWS']"], {}), "(lh_IFo_area['PWS'])\n", (10432, 10452), True, 'import numpy as np\n'), ((10454, 10480), 'numpy.std', 'np.std', (["lh_IFo_area['PWS']"], {}), "(lh_IFo_area['PWS'])\n", (10460, 10480), True, 'import numpy as np\n'), ((10497, 10524), 'numpy.mean', 'np.mean', (["lh_IFo_area['PFS']"], {}), "(lh_IFo_area['PFS'])\n", (10504, 10524), True, 'import numpy as np\n'), ((10526, 10552), 'numpy.std', 'np.std', (["lh_IFo_area['PFS']"], {}), "(lh_IFo_area['PFS'])\n", (10532, 10552), True, 'import numpy as np\n'), ((10851, 10882), 'numpy.mean', 'np.mean', (["lh_IFo_areaNorm['PWS']"], {}), "(lh_IFo_areaNorm['PWS'])\n", (10858, 10882), True, 'import numpy as np\n'), ((10884, 10914), 'numpy.std', 'np.std', (["lh_IFo_areaNorm['PWS']"], {}), "(lh_IFo_areaNorm['PWS'])\n", (10890, 10914), True, 'import numpy as np\n'), ((10931, 10962), 'numpy.mean', 'np.mean', (["lh_IFo_areaNorm['PFS']"], {}), "(lh_IFo_areaNorm['PFS'])\n", (10938, 10962), True, 'import numpy as np\n'), ((10964, 10994), 'numpy.std', 'np.std', (["lh_IFo_areaNorm['PFS']"], {}), "(lh_IFo_areaNorm['PFS'])\n", (10970, 10994), True, 'import numpy as np\n'), ((8987, 9049), 'scipy.stats.spearmanr', 'stats.spearmanr', (['SSI4[isPWS == 1]', 'labAreaNorm[isPWS == 1, i0]'], {}), '(SSI4[isPWS == 1], labAreaNorm[isPWS == 1, i0])\n', (9002, 9049), True, 'import scipy.stats as stats\n'), ((3912, 4030), 'subprocess.Popen', 'Popen', (["['mri_segstats', '--annot', sID, hemi, 'aparc12', '--in', hthick, '--sum',\n tmpfn]"], {'stdout': 'PIPE', 'stderr': 'PIPE'}), "(['mri_segstats', '--annot', sID, hemi, 'aparc12', '--in', hthick,\n '--sum', tmpfn], stdout=PIPE, stderr=PIPE)\n", (3917, 4030), False, 'from subprocess import Popen, PIPE\n'), ((8351, 8377), 'scai_stats.cohens_d', 'cohens_d', (['dat_PWS', 'dat_PFS'], {}), '(dat_PWS, dat_PFS)\n', (8359, 8377), False, 'from scai_stats import cohens_d\n'), ((9159, 9221), 'scipy.stats.spearmanr', 'stats.spearmanr', (['SSI4[isPWS == 1]', 'labAvgThick[isPWS == 1, i0]'], {}), '(SSI4[isPWS == 1], labAvgThick[isPWS == 1, i0])\n', (9174, 9221), True, 'import scipy.stats as stats\n'), ((8209, 8225), 'numpy.mean', 'np.mean', (['dat_PWS'], {}), '(dat_PWS)\n', (8216, 8225), True, 'import numpy as np\n'), ((8227, 8242), 'numpy.std', 'np.std', (['dat_PWS'], {}), '(dat_PWS)\n', (8233, 8242), True, 'import numpy as np\n'), ((8271, 8287), 'numpy.mean', 'np.mean', (['dat_PFS'], {}), '(dat_PFS)\n', (8278, 8287), True, 'import numpy as np\n'), ((8289, 8304), 'numpy.std', 'np.std', (['dat_PFS'], {}), '(dat_PFS)\n', (8295, 8304), True, 'import numpy as np\n')] |
import pandas
import os
import math
import numpy
from scipy import stats
pandas.set_option('display.max_rows', 200, 'display.max_columns', 200) # change it to see more or less rows and/or columns
# Ask inputs to read the TSV file
dirPath = input('Enter path to TSV file: ')
inputName = input('Enter TSV name (input file): ')
# Read input file
inputTSV = pandas.read_csv(os.path.join(dirPath, inputName), sep = '\t', index_col = False, dtype = object)
print('The input TSV has ' + str(len(inputTSV)) + ' variants.')
## Show to the user the options to filter by consequence and impact
print('The different consequence in this input are: ' + ', '.join(inputTSV.Consequence.unique()))
print('The different impact in this input are: ' + ', '.join(inputTSV.IMPACT.unique()))
## Ask inputs to filter
snp_indel_filter = input('Enter "snp", "indel" or "snp|indel": ')
consequence_filter = input('Enter consequence filter (e.g. missense_variant, stop_gained, synonymous_variant), to use multiple consequence separate them with "|", or "all" for all the consequences: ')
impact_filter = input('Enter impact filter (e.g. HIGH, MODERATE, LOW, MODIFIER), to use multiple impact separate them with "|", or "all" for all the impact: ')
gnomADg_AF_nfe_filter = float(input('Enter AF to filter by gnomAD NFE (e.g. 0.05): '))
gnomADg_AF_filter = float(input('Enter AF to filter by gnomAD all population (e.g. 0.05): '))
CSVS_AF_filter = float(input('Enter AF to filter by CSVS (e.g. 0.05): '))
# Transform some columns to float
colsToFloat = ['CADD_PHRED', 'CADD_RAW', 'gnomADg_AF_nfe', 'gnomADg_AF', 'CSVS_AF', 'AF']
for col in colsToFloat:
inputTSV[col] = inputTSV[col].astype(float)
dtypes = dict(inputTSV.dtypes) # Check the types of each column
# Filter variants without symbol
inputTSV_SYMBOL = inputTSV[inputTSV['SYMBOL'].notnull()]
print('After filtering by those variants with gene name there are: ' + str(len(inputTSV_SYMBOL)) + ' variants')
# Filter by SNPs/indels
nt = ['A', 'T', 'C', 'G']
if snp_indel_filter == 'snp':
inputTSV_nt = inputTSV_SYMBOL[inputTSV_SYMBOL['REF'].isin(nt) & inputTSV_SYMBOL['ALT'].isin(nt)]
inputTSV_nt.REF.unique()
inputTSV_nt.ALT.unique()
elif snp_indel_filter == 'indel':
inputTSV_nt = inputTSV_SYMBOL[~inputTSV_SYMBOL['REF'].isin(nt) | ~inputTSV_SYMBOL['ALT'].isin(nt)]
elif snp_indel_filter == 'snp|indel':
inputTSV_nt = inputTSV_SYMBOL
else:
print('Bad snp/indel filter introduced, the options are: "snp", "indel" or "both"')
print('After filtering by ' + snp_indel_filter + ' there are: ' + str(len(inputTSV_nt)) + ' variants')
# Filter by variant type (Consequence)
if consequence_filter == 'all':
consequenceDF = inputTSV_nt
else:
consequenceDF = inputTSV_nt[inputTSV_nt['Consequence'].str.contains(consequence_filter)]
print('After filtering by the consequence(s) ' + consequence_filter + ' there are: ' + str(len(consequenceDF)) + ' variants')
# print(consequenceDF.REF.unique())
# print(consequenceDF.ALT.unique())
# Filter by impact
if impact_filter == 'all':
consequence_impactDF = consequenceDF
else:
consequence_impactDF = consequenceDF[consequenceDF['IMPACT'].str.contains(impact_filter)]
print('After filtering by the impact(s) ' + impact_filter + ' there are: ' + str(len(consequence_impactDF)) + ' variants')
# Filter by AF
## SNVs without data for gnomADg and CSVS should have it to perform the GBA with values for these variants
## Variant without data will be: AC = 0, AF = 0, AN = mean(AN in gene)
### Create table with meanAN for each genes (all SNVs, before filters)
#### gnomAD
cols_meanAN_gnomAD = ['SYMBOL', 'gnomADg_AN_nfe', 'gnomADg_AN']
meanAN_DF_gnomAD = inputTSV[cols_meanAN_gnomAD] # DF with less columns
meanAN_DF_gnomAD = meanAN_DF_gnomAD[meanAN_DF_gnomAD['gnomADg_AN_nfe'].notnull()] # Variants with value
print('There are ' + str(len(meanAN_DF_gnomAD)) + ' variants with value in gnomAD')
colsToNumeric_gnomAD = ['gnomADg_AN_nfe', 'gnomADg_AN'] # Transform columns to numeric
for col in colsToNumeric_gnomAD:
meanAN_DF_gnomAD[col] = meanAN_DF_gnomAD[col].astype('int32')
meanAN_DF_gnomAD = meanAN_DF_gnomAD.groupby(by = ['SYMBOL']).mean() # Calculate the mean for each gene
meanAN_DF_gnomAD = meanAN_DF_gnomAD.round(0).astype(int) # Number without decimals
meanAN_DF_gnomAD = meanAN_DF_gnomAD.reset_index() # Reset index to avoid errors
print('There are ' + str(len(meanAN_DF_gnomAD)) + ' genes with value in gnomAD')
#### CSVS
cols_meanAN_CSVS = ['SYMBOL', 'CSVS_AN']
meanAN_DF_CSVS = inputTSV[cols_meanAN_CSVS]
meanAN_DF_CSVS = meanAN_DF_CSVS[meanAN_DF_CSVS['CSVS_AN'].notnull()]
print('There are ' + str(len(meanAN_DF_CSVS)) + ' variants with value in CSVS')
colsToNumeric_CSVS = ['CSVS_AN']
for col in colsToNumeric_CSVS:
meanAN_DF_CSVS[col] = meanAN_DF_CSVS[col].astype('int32')
meanAN_DF_CSVS = meanAN_DF_CSVS.groupby(by = ['SYMBOL']).mean()
meanAN_DF_CSVS = meanAN_DF_CSVS.round(0).astype(int)
meanAN_DF_CSVS = meanAN_DF_CSVS.reset_index()
print('There are ' + str(len(meanAN_DF_CSVS)) + ' genes with value in CSVS')
### Merge gnomAD and CSVS
meanAN_DF = pandas.merge(meanAN_DF_gnomAD, meanAN_DF_CSVS, how = 'left', left_on = 'SYMBOL', right_on = 'SYMBOL')
#### Genes without value in both databases
symbol_diff = list(set(inputTSV.SYMBOL.unique().tolist()).difference(set(meanAN_DF.SYMBOL.unique().tolist())))
for symb in symbol_diff:
meanAN_DF = meanAN_DF.append({'SYMBOL': symb}, ignore_index = True)
### If there is a gene without any value use as AN the mean of all the genes for the same database
for c in meanAN_DF.columns[1:]:
for r in meanAN_DF.index:
if math.isnan(meanAN_DF.loc[r,c]):
# print('There is a nan in the column: ' + str(c) + ' and in the row: ' + str(r))
meanAN_col = numpy.mean(meanAN_DF[c])
meanAN_DF.loc[r,c] = meanAN_col.round(0).astype(int)
colsToNumeric_meanAN_DF = ['gnomADg_AN_nfe', 'gnomADg_AN', 'CSVS_AN'] # Transform columns to numeric
for col in colsToNumeric_meanAN_DF:
meanAN_DF[col] = meanAN_DF[col].astype('int32')
## Add values to DF
consequence_impact_AFnfe_AFallDF = consequence_impactDF
for r in consequence_impact_AFnfe_AFallDF.index:
if pandas.isnull(consequence_impact_AFnfe_AFallDF['gnomADg'][r]) == True:
# AF & AC
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AF_nfe'] = 0
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AC_nfe'] = 0
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AF'] = 0
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AC'] = 0
# AN
rGene = consequence_impact_AFnfe_AFallDF['SYMBOL'][r]
ANGene_nfe = meanAN_DF.loc[meanAN_DF['SYMBOL'] == rGene, 'gnomADg_AN_nfe'].iloc[0]
ANGene = meanAN_DF.loc[meanAN_DF['SYMBOL'] == rGene, 'gnomADg_AN'].iloc[0]
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AN_nfe'] = ANGene_nfe
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AN'] = ANGene
if pandas.isnull(consequence_impact_AFnfe_AFallDF['CSVS'][r]) == True:
# AF & AC
consequence_impact_AFnfe_AFallDF.at[r, 'CSVS_AF'] = 0
consequence_impact_AFnfe_AFallDF.at[r, 'CSVS_AC'] = 0
# AN
rGene = consequence_impact_AFnfe_AFallDF['SYMBOL'][r]
ANGene_CSVS = meanAN_DF.loc[meanAN_DF['SYMBOL'] == rGene, 'CSVS_AN'].iloc[0]
consequence_impact_AFnfe_AFallDF.at[r, 'CSVS_AN'] = ANGene_CSVS
## Filter by AF of gnomAD nfe, gnomAD and CSVS
consequence_impact_AFnfe_AFallDF_AFCSVS = consequence_impact_AFnfe_AFallDF[(consequence_impact_AFnfe_AFallDF['gnomADg_AF_nfe'] < gnomADg_AF_nfe_filter) & (consequence_impact_AFnfe_AFallDF['gnomADg_AF'] < gnomADg_AF_filter) & (consequence_impact_AFnfe_AFallDF['CSVS_AF'] < CSVS_AF_filter)]
print('After filtering by the AF: ' + str(gnomADg_AF_nfe_filter) + ', ' + str(gnomADg_AF_filter) + ', ' + str(CSVS_AF_filter) + ' there are: ' + str(len(consequence_impact_AFnfe_AFallDF_AFCSVS)) + ' variants')
# Create DF as GBA input
## The necessary columns are the gene and the AC and AN for the cases and each database
colsGBA = ['SYMBOL', 'AC', 'AN', 'gnomADg_AC_nfe', 'gnomADg_AN_nfe', 'gnomADg_AC', 'gnomADg_AN', 'CSVS_AC', 'CSVS_AN'] # AN are the total number of alleles
inputGBA_SNV = consequence_impact_AFnfe_AFallDF_AFCSVS[colsGBA]
## Rename cases colnames
colsRenameIndex = [1,2]
namesColsNew = ['AC_cases', 'AN_cases']
namesColsOld = inputGBA_SNV.columns[colsRenameIndex]
inputGBA_SNV.rename(columns = dict(zip(namesColsOld, namesColsNew)), inplace = True)
## Change to integer, except SYMBOL column
for col in inputGBA_SNV.columns[1:]:
inputGBA_SNV[col] = inputGBA_SNV[col].astype(int)
dtypes = dict(inputGBA_SNV.dtypes) # Check the types of each column
## Calculate WT (wild type): WT = total alleles - allele count (variant)
inputGBA_SNV['WT_cases'] = inputGBA_SNV['AN_cases'] - inputGBA_SNV['AC_cases']
inputGBA_SNV['WT_gnomADg_nfe'] = inputGBA_SNV['gnomADg_AN_nfe'] - inputGBA_SNV['gnomADg_AC_nfe']
inputGBA_SNV['WT_gnomADg'] = inputGBA_SNV['gnomADg_AN'] - inputGBA_SNV['gnomADg_AC']
inputGBA_SNV['WT_CSVS'] = inputGBA_SNV['CSVS_AN'] - inputGBA_SNV['CSVS_AC']
## Remove columns with AN
inputGBA_SNV = inputGBA_SNV[inputGBA_SNV.columns.drop(list(inputGBA_SNV.filter(regex = 'AN')))]
## Calculate the sum for each column grouping by gene name
inputGBA = inputGBA_SNV.groupby(by = ['SYMBOL']).sum()
inputGBA = inputGBA.reset_index()
print('The number of genes for the GBA is: ' + str(len(inputGBA)))
# Extract genes with all SNV novel
## Is not possible to perform a gene burden with values = 0
## Study separatly
novelSNV_genes = inputGBA[(inputGBA['gnomADg_AC_nfe'] == 0) | (inputGBA['gnomADg_AC'] == 0) | (inputGBA['CSVS_AC'] == 0)]
## Save genes with novel SNV
outPath = os.path.join(dirPath, 'GBA')
outName = os.path.join(outPath, os.path.splitext(inputName)[0])
outName_novel = '.'.join([outName, 'GBA', snp_indel_filter.replace('|', '_'), consequence_filter.replace('|', '_'), impact_filter.replace('|', '_'), str(gnomADg_AF_nfe_filter).replace('.', ''), str(gnomADg_AF_filter).replace('.', ''), str(CSVS_AF_filter).replace('.', ''), 'novelSNV', 'tsv'])
novelSNV_genes.to_csv(outName_novel, header = True, index = None, sep = '\t', float_format = '%.16f')
# Odd Ratio
inputGBA_OR = inputGBA
inputGBA_OR['OR_gnomADg_nfe'] = (inputGBA_OR['AC_cases']*inputGBA_OR['WT_gnomADg_nfe'])/(inputGBA_OR['WT_cases']*inputGBA_OR['gnomADg_AC_nfe'])
inputGBA_OR['OR_gnomADg'] = (inputGBA_OR['AC_cases']*inputGBA_OR['WT_gnomADg'])/(inputGBA_OR['WT_cases']*inputGBA_OR['gnomADg_AC'])
inputGBA_OR['OR_CSVS'] = (inputGBA_OR['AC_cases']*inputGBA_OR['WT_CSVS'])/(inputGBA_OR['WT_cases']*inputGBA_OR['CSVS_AC'])
# Standard error
## Calculate the summary
inputGBA_OR_SE = inputGBA_OR
sumList_nfe = ((1/inputGBA_OR_SE['AC_cases']) + (1/inputGBA_OR_SE['gnomADg_AC_nfe']) + (1/inputGBA_OR_SE['WT_cases']) + (1/inputGBA_OR_SE['WT_gnomADg_nfe'])).tolist()
sumList_gnomAD = ((1/inputGBA_OR_SE['AC_cases']) + (1/inputGBA_OR_SE['gnomADg_AC']) + (1/inputGBA_OR_SE['WT_cases']) + (1/inputGBA_OR_SE['WT_gnomADg'])).tolist()
sumList_CSVS = ((1/inputGBA_OR_SE['AC_cases']) + (1/inputGBA_OR_SE['CSVS_AC']) + (1/inputGBA_OR_SE['WT_cases']) + (1/inputGBA_OR_SE['WT_CSVS'])).tolist()
## Perform the sqrt
SElist_nfe = []
for sum in sumList_nfe:
SE = math.sqrt(sum)
SElist_nfe.append(SE)
SElist_gnomAD = []
for sum in sumList_gnomAD:
SE = math.sqrt(sum)
SElist_gnomAD.append(SE)
SElist_CSVS = []
for sum in sumList_CSVS:
SE = math.sqrt(sum)
SElist_CSVS.append(SE)
inputGBA_OR_SE['SE_gnomADg_nfe'] = SElist_nfe
inputGBA_OR_SE['SE_gnomADg'] = SElist_gnomAD
inputGBA_OR_SE['SE_CSVS'] = SElist_CSVS
# inputGBA['SE_gnomADg_nfe'] = math.sqrt((1/inputGBA['AC_cases']) + (1/inputGBA['gnomADg_AC_nfe']) + (1/inputGBA['WT_cases']) + (1/inputGBA['WT_gnomADg_nfe'])) --> doesn't work
# Z-Score
inputGBA_OR_SE_Z = inputGBA_OR_SE
inputGBA_OR_SE_Z['ZScore_gnomADg_nfe'] = numpy.log(inputGBA_OR_SE_Z['OR_gnomADg_nfe'])/inputGBA_OR_SE_Z['SE_gnomADg_nfe']
inputGBA_OR_SE_Z['ZScore_gnomADg'] = numpy.log(inputGBA_OR_SE_Z['OR_gnomADg'])/inputGBA_OR_SE_Z['SE_gnomADg']
inputGBA_OR_SE_Z['ZScore_CSVS'] = numpy.log(inputGBA_OR_SE_Z['OR_CSVS'])/inputGBA_OR_SE_Z['SE_CSVS']
# p-value
inputGBA_OR_SE_Z_pv = inputGBA_OR_SE_Z
inputGBA_OR_SE_Z_pv['pvalue_gnomADg_nfe'] = stats.norm.sf(abs(inputGBA_OR_SE_Z['ZScore_gnomADg_nfe']))*2 # using CCDF
inputGBA_OR_SE_Z_pv['pvalue_gnomADg'] = stats.norm.sf(abs(inputGBA_OR_SE_Z['ZScore_gnomADg']))*2
inputGBA_OR_SE_Z_pv['pvalue_CSVS'] = stats.norm.sf(abs(inputGBA_OR_SE_Z['ZScore_CSVS']))*2
### (1 - stats.norm.cdf(abs(inputGBA_OR_SE_Z['ZScore_gnomADg_nfe'])))*2 # using CDF --> same: 1 - CDF = CCDF
# FDR - number of genes
inputGBA_OR_SE_Z_pv['FDR_gnomADg_nfe'] = inputGBA_OR_SE_Z_pv['pvalue_gnomADg_nfe'] * len(inputGBA_OR_SE_Z_pv[inputGBA_OR_SE_Z_pv['gnomADg_AC_nfe'] != 0]) # number of genes in the analysis
inputGBA_OR_SE_Z_pv['FDR_gnomADg'] = inputGBA_OR_SE_Z_pv['pvalue_gnomADg'] * len(inputGBA_OR_SE_Z_pv[inputGBA_OR_SE_Z_pv['gnomADg_AC'] != 0])
inputGBA_OR_SE_Z_pv['FDR_CSVS'] = inputGBA_OR_SE_Z_pv['pvalue_CSVS'] * len(inputGBA_OR_SE_Z_pv[inputGBA_OR_SE_Z_pv['CSVS_AC'] != 0])
# Confidence interval
inputGBA_OR_SE_Z_pv_CI = inputGBA_OR_SE_Z_pv
inputGBA_OR_SE_Z_pv_CI['lowCI_gnomADg_nfe'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_gnomADg_nfe']) - 1.96*inputGBA_OR_SE_Z_pv['SE_gnomADg_nfe'])
inputGBA_OR_SE_Z_pv_CI['highCI_gnomADg_nfe'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_gnomADg_nfe']) + 1.96*inputGBA_OR_SE_Z_pv['SE_gnomADg_nfe'])
inputGBA_OR_SE_Z_pv_CI['lowCI_gnomADg'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_gnomADg']) - 1.96*inputGBA_OR_SE_Z_pv['SE_gnomADg'])
inputGBA_OR_SE_Z_pv_CI['highCI_gnomADg'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_gnomADg']) + 1.96*inputGBA_OR_SE_Z_pv['SE_gnomADg'])
inputGBA_OR_SE_Z_pv_CI['lowCI_CSVS'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_CSVS']) - 1.96*inputGBA_OR_SE_Z_pv['SE_CSVS'])
inputGBA_OR_SE_Z_pv_CI['highCI_CSVS'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_CSVS']) + 1.96*inputGBA_OR_SE_Z_pv['SE_CSVS'])
# Reorder columns
colsOrderFinal = ['SYMBOL', 'AC_cases', 'WT_cases', 'gnomADg_AC_nfe', 'WT_gnomADg_nfe', 'OR_gnomADg_nfe', 'SE_gnomADg_nfe', 'ZScore_gnomADg_nfe', 'pvalue_gnomADg_nfe', 'FDR_gnomADg_nfe', 'lowCI_gnomADg_nfe', 'highCI_gnomADg_nfe', 'gnomADg_AC', 'WT_gnomADg', 'OR_gnomADg', 'SE_gnomADg', 'ZScore_gnomADg', 'pvalue_gnomADg', 'FDR_gnomADg', 'lowCI_gnomADg', 'highCI_gnomADg', 'CSVS_AC', 'WT_CSVS', 'OR_CSVS', 'SE_CSVS', 'ZScore_CSVS', 'pvalue_CSVS', 'FDR_CSVS', 'lowCI_CSVS', 'highCI_CSVS']
GBA = inputGBA_OR_SE_Z_pv_CI[colsOrderFinal]
# Filter by FDR < 0.05 in each database and combining them
GBA_nfe = GBA[GBA['FDR_gnomADg_nfe'] < 0.05]
GBA_gnomAD = GBA[GBA['FDR_gnomADg'] < 0.05]
GBA_CSVS = GBA[GBA['FDR_CSVS'] < 0.05]
GBA_gnomAD_all = GBA[(GBA['FDR_gnomADg_nfe'] < 0.05) & (GBA['FDR_gnomADg'] < 0.05)]
GBA_all = GBA[(GBA['FDR_gnomADg_nfe'] < 0.05) & (GBA['FDR_gnomADg'] < 0.05) & (GBA['FDR_CSVS'] < 0.05)]
# Print the results of the GBA
print('The GBA is done. Filtering by FDR < 0.05 for the following databases, the next number of genes were enriched:')
print('gnomAD NFE: ' + str(len(GBA_nfe)) + ' genes')
print('gnomAD all population: ' + str(len(GBA_gnomAD)) + ' genes')
print('gnomAD NFE + gnomAD all population: ' + str(len(GBA_gnomAD_all)) + ' genes')
print('CSVS: ' + str(len(GBA_CSVS)) + ' genes')
print('gnomAD NFE + gnomAD all population + CSVS: ' + str(len(GBA_all)) + ' genes')
# Write files, using input filters in the file name
outPath = os.path.join(dirPath, 'GBA')
print('Files will be saved in ' + outPath)
outName = os.path.join(outPath, os.path.splitext(inputName)[0])
print(outName)
outName_GBA = '.'.join([outName, 'GBA', snp_indel_filter.replace('|', '_'), consequence_filter.replace('|', '_'), impact_filter.replace('|', '_'), str(gnomADg_AF_nfe_filter).replace('.', ''), str(gnomADg_AF_filter).replace('.', ''), str(CSVS_AF_filter).replace('.', ''), 'tsv'])
GBA.to_csv(outName_GBA, header = True, index = None, sep = '\t', float_format = '%.16f')
if len(GBA_nfe) != 0:
outName_GBA_nfe = '.'.join([outName, 'GBA', snp_indel_filter.replace('|', '_'), consequence_filter.replace('|', '_'), impact_filter.replace('|', '_'), str(gnomADg_AF_nfe_filter).replace('.', ''), str(gnomADg_AF_filter).replace('.', ''), str(CSVS_AF_filter).replace('.', ''), 'gnomAD_nfe', 'tsv'])
GBA_nfe.to_csv(outName_GBA_nfe, header = True, index = None, sep = '\t', float_format = '%.16f')
if len(GBA_gnomAD) != 0:
outName_GBA_gnomAD = '.'.join([outName, 'GBA', snp_indel_filter.replace('|', '_'), consequence_filter.replace('|', '_'), impact_filter.replace('|', '_'), str(gnomADg_AF_nfe_filter).replace('.', ''), str(gnomADg_AF_filter).replace('.', ''), str(CSVS_AF_filter).replace('.', ''), 'gnomAD', 'tsv'])
GBA_gnomAD.to_csv(outName_GBA_gnomAD, header = True, index = None, sep = '\t', float_format = '%.16f')
if len(GBA_CSVS) != 0:
outName_GBA_CSVS = '.'.join([outName, 'GBA', snp_indel_filter.replace('|', '_'), consequence_filter.replace('|', '_'), impact_filter.replace('|', '_'), str(gnomADg_AF_nfe_filter).replace('.', ''), str(gnomADg_AF_filter).replace('.', ''), str(CSVS_AF_filter).replace('.', ''), 'CSVS', 'tsv'])
GBA_CSVS.to_csv(outName_GBA_CSVS, header = True, index = None, sep = '\t', float_format = '%.16f')
if len(GBA_gnomAD_all) != 0:
outName_GBA_gnomAD_all = '.'.join([outName, 'GBA', snp_indel_filter.replace('|', '_'), consequence_filter.replace('|', '_'), impact_filter.replace('|', '_'), str(gnomADg_AF_nfe_filter).replace('.', ''), str(gnomADg_AF_filter).replace('.', ''), str(CSVS_AF_filter).replace('.', ''), 'gnomAD_nfe', 'gnomAD', 'tsv'])
GBA_gnomAD_all.to_csv(outName_GBA_gnomAD_all, header = True, index = None, sep = '\t', float_format = '%.16f')
if len(GBA_all) != 0:
outName_GBA_all = '.'.join([outName, 'GBA', snp_indel_filter.replace('|', '_'), consequence_filter.replace('|', '_'), impact_filter.replace('|', '_'), str(gnomADg_AF_nfe_filter).replace('.', ''), str(gnomADg_AF_filter).replace('.', ''), str(CSVS_AF_filter).replace('.', ''), 'gnomAD_nfe', 'gnomAD', 'CSVS', 'tsv'])
GBA_all.to_csv(outName_GBA_all, header = True, index = None, sep = '\t', float_format = '%.16f')
print('All files saved')
print('PROCESS FINISHED')
| [
"pandas.isnull",
"numpy.mean",
"pandas.merge",
"numpy.log",
"math.sqrt",
"os.path.join",
"os.path.splitext",
"pandas.set_option",
"math.isnan"
] | [((74, 144), 'pandas.set_option', 'pandas.set_option', (['"""display.max_rows"""', '(200)', '"""display.max_columns"""', '(200)'], {}), "('display.max_rows', 200, 'display.max_columns', 200)\n", (91, 144), False, 'import pandas\n'), ((5100, 5199), 'pandas.merge', 'pandas.merge', (['meanAN_DF_gnomAD', 'meanAN_DF_CSVS'], {'how': '"""left"""', 'left_on': '"""SYMBOL"""', 'right_on': '"""SYMBOL"""'}), "(meanAN_DF_gnomAD, meanAN_DF_CSVS, how='left', left_on='SYMBOL',\n right_on='SYMBOL')\n", (5112, 5199), False, 'import pandas\n'), ((9739, 9767), 'os.path.join', 'os.path.join', (['dirPath', '"""GBA"""'], {}), "(dirPath, 'GBA')\n", (9751, 9767), False, 'import os\n'), ((15557, 15585), 'os.path.join', 'os.path.join', (['dirPath', '"""GBA"""'], {}), "(dirPath, 'GBA')\n", (15569, 15585), False, 'import os\n'), ((374, 406), 'os.path.join', 'os.path.join', (['dirPath', 'inputName'], {}), '(dirPath, inputName)\n', (386, 406), False, 'import os\n'), ((11287, 11301), 'math.sqrt', 'math.sqrt', (['sum'], {}), '(sum)\n', (11296, 11301), False, 'import math\n'), ((11384, 11398), 'math.sqrt', 'math.sqrt', (['sum'], {}), '(sum)\n', (11393, 11398), False, 'import math\n'), ((11480, 11494), 'math.sqrt', 'math.sqrt', (['sum'], {}), '(sum)\n', (11489, 11494), False, 'import math\n'), ((11920, 11965), 'numpy.log', 'numpy.log', (["inputGBA_OR_SE_Z['OR_gnomADg_nfe']"], {}), "(inputGBA_OR_SE_Z['OR_gnomADg_nfe'])\n", (11929, 11965), False, 'import numpy\n'), ((12038, 12079), 'numpy.log', 'numpy.log', (["inputGBA_OR_SE_Z['OR_gnomADg']"], {}), "(inputGBA_OR_SE_Z['OR_gnomADg'])\n", (12047, 12079), False, 'import numpy\n'), ((12145, 12183), 'numpy.log', 'numpy.log', (["inputGBA_OR_SE_Z['OR_CSVS']"], {}), "(inputGBA_OR_SE_Z['OR_CSVS'])\n", (12154, 12183), False, 'import numpy\n'), ((5627, 5658), 'math.isnan', 'math.isnan', (['meanAN_DF.loc[r, c]'], {}), '(meanAN_DF.loc[r, c])\n', (5637, 5658), False, 'import math\n'), ((6192, 6253), 'pandas.isnull', 'pandas.isnull', (["consequence_impact_AFnfe_AFallDF['gnomADg'][r]"], {}), "(consequence_impact_AFnfe_AFallDF['gnomADg'][r])\n", (6205, 6253), False, 'import pandas\n'), ((6953, 7011), 'pandas.isnull', 'pandas.isnull', (["consequence_impact_AFnfe_AFallDF['CSVS'][r]"], {}), "(consequence_impact_AFnfe_AFallDF['CSVS'][r])\n", (6966, 7011), False, 'import pandas\n'), ((9800, 9827), 'os.path.splitext', 'os.path.splitext', (['inputName'], {}), '(inputName)\n', (9816, 9827), False, 'import os\n'), ((13291, 13339), 'numpy.log', 'numpy.log', (["inputGBA_OR_SE_Z_pv['OR_gnomADg_nfe']"], {}), "(inputGBA_OR_SE_Z_pv['OR_gnomADg_nfe'])\n", (13300, 13339), False, 'import numpy\n'), ((13443, 13491), 'numpy.log', 'numpy.log', (["inputGBA_OR_SE_Z_pv['OR_gnomADg_nfe']"], {}), "(inputGBA_OR_SE_Z_pv['OR_gnomADg_nfe'])\n", (13452, 13491), False, 'import numpy\n'), ((13590, 13634), 'numpy.log', 'numpy.log', (["inputGBA_OR_SE_Z_pv['OR_gnomADg']"], {}), "(inputGBA_OR_SE_Z_pv['OR_gnomADg'])\n", (13599, 13634), False, 'import numpy\n'), ((13730, 13774), 'numpy.log', 'numpy.log', (["inputGBA_OR_SE_Z_pv['OR_gnomADg']"], {}), "(inputGBA_OR_SE_Z_pv['OR_gnomADg'])\n", (13739, 13774), False, 'import numpy\n'), ((13866, 13907), 'numpy.log', 'numpy.log', (["inputGBA_OR_SE_Z_pv['OR_CSVS']"], {}), "(inputGBA_OR_SE_Z_pv['OR_CSVS'])\n", (13875, 13907), False, 'import numpy\n'), ((13997, 14038), 'numpy.log', 'numpy.log', (["inputGBA_OR_SE_Z_pv['OR_CSVS']"], {}), "(inputGBA_OR_SE_Z_pv['OR_CSVS'])\n", (14006, 14038), False, 'import numpy\n'), ((15661, 15688), 'os.path.splitext', 'os.path.splitext', (['inputName'], {}), '(inputName)\n', (15677, 15688), False, 'import os\n'), ((5778, 5802), 'numpy.mean', 'numpy.mean', (['meanAN_DF[c]'], {}), '(meanAN_DF[c])\n', (5788, 5802), False, 'import numpy\n')] |
import numpy as np
import pandas as pd
import qrcode
import os
import sys
import time
#图像类别:emoji,动图,商品图片,文字图片,二维码,小程序码
#图像分类
from cv2 import cv2
from PIL import Image,ImageDraw
from datetime import datetime
import time
from pytesseract import image_to_string
class Detect():
def __init__(self):
pass
#detectFaces()返回图像中所有人脸的矩形坐标(矩形左上、右下顶点)
#使用haar特征的级联分类器haarcascade_frontalface_default.xml,在haarcascades目录下还有其他的训练好的xml文件可供选择。
#注:haarcascades目录下训练好的分类器必须以灰度图作为输入。
def detectFaces(self,image_name):
img = cv2.imread(image_name)
face_cascade = cv2.CascadeClassifier("./haar/haarcascades/haarcascade_frontalface_default.xml")
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img #if语句:如果img维度为3,说明不是灰度图,先转化为灰度图gray,如果不为3,也就是2,原图就是灰度图
faces = face_cascade.detectMultiScale(gray, 1.1, 5)#1.3和5是特征的最小、最大检测窗口,它改变检测结果也会改变
result = []
for (x,y,width,height) in faces:
result.append((x,y,x+width,y+height))
return result
#保存人脸图
def saveFaces(self,image_name):
faces = self.detectFaces(image_name)
if faces:
for (x1,y1,x2,y2) in faces:
Image.open(image_name).crop((x1,y1,x2,y2)).save(image_name)
#在原图像上画矩形,框出所有人脸。
#调用Image模块的draw方法,Image.open获取图像句柄,ImageDraw.Draw获取该图像的draw实例,然后调用该draw实例的rectangle方法画矩形(矩形的坐标即
#detectFaces返回的坐标),outline是矩形线条颜色(B,G,R)。
#注:原始图像如果是灰度图,则去掉outline,因为灰度图没有RGB可言。drawEyes、detectSmiles也一样。
def drawFaces(self,image_name):
faces = self.detectFaces(image_name)
if faces:
img = Image.open(image_name)
draw_instance = ImageDraw.Draw(img)
for (x1,y1,x2,y2) in faces:
draw_instance.rectangle((x1,y1,x2,y2), outline=(255, 0,0))
img.save(image_name)
#检测眼睛,返回坐标
#由于眼睛在人脸上,我们往往是先检测出人脸,再细入地检测眼睛。故detectEyes可在detectFaces基础上来进行,代码中需要注意“相对坐标”。
#当然也可以在整张图片上直接使用分类器,这种方法代码跟detectFaces一样,这里不多说。
def detectEyes(self,image_name):
eye_cascade = cv2.CascadeClassifier('./haar/haarcascades/haarcascade_eye.xml')
faces = self.detectFaces(image_name)
img = cv2.imread(image_name)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
result = []
for (x1,y1,x2,y2) in faces:
roi_gray = gray[y1:y2, x1:x2]
eyes = eye_cascade.detectMultiScale(roi_gray,1.3,2)
for (ex,ey,ew,eh) in eyes:
result.append((x1+ex,y1+ey,x1+ex+ew,y1+ey+eh))
return result
#在原图像上框出眼睛.
def drawEyes(self,image_name):
eyes = self.detectEyes(image_name)
if eyes:
img = Image.open(image_name)
draw_instance = ImageDraw.Draw(img)
for (x1,y1,x2,y2) in eyes:
draw_instance.rectangle((x1,y1,x2,y2), outline=(0, 0,255))
img.save(image_name)
#检测笑脸
def detectSmiles(self,image_name):
img = cv2.imread(image_name)
smiles_cascade = cv2.CascadeClassifier("./haar/haarcascades/haarcascade_smile.xml")
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img #if语句:如果img维度为3,说明不是灰度图,先转化为灰度图gray,如果不为3,也就是2,原图就是灰度图
smiles = smiles_cascade.detectMultiScale(gray,4,5)
result = []
for (x,y,width,height) in smiles:
result.append((x,y,x+width,y+height))
return result
#在原图像上框出笑脸
def drawSmiles(self,image_name):
smiles = self.detectSmiles(image_name)
if smiles:
img = Image.open(image_name)
draw_instance = ImageDraw.Draw(img)
for (x1,y1,x2,y2) in smiles:
draw_instance.rectangle((x1,y1,x2,y2), outline=(100, 100,0))
img.save(image_name)
def img_to_str(self,url):
image = Image.open(url)
#识别过程
text = image_to_string(image,lang='chi_sim')
return text
def dect_face(path):
time1=datetime.now()
detect_obj=Detect()
status=[]
#path=r"./image" #文件路径
filelist = sorted(os.listdir(path),key=lambda x: int(x[:-4])) #该文件夹下的所有文件
for i in filelist:
if i=='.DS_Store':
continue
else:
result=detect_obj.detectFaces(path+'/'+i)
time2=datetime.now()
print("耗时:"+str(time2-time1))
if len(result)>0:
print(i,"人数:"+str(len(result)))
detect_obj.drawFaces(path+'/'+i)
#drawEyes('./resources/pic/slx.jpg')
# drawSmiles('obama.jpg')
#detect_obj.saveFaces(path+'/'+i)
status.append(1)
else:
print(i,'无人')
status.append(0)
status=np.array(status)/len(filelist)
return len(filelist),status
if __name__ == '__main__':
print(dect_face('./image'))
# cut_point=[]
# change=0
# for i in range(len(status)-2):
# if status[i]==status[i+1]:
# change=0
# else:
# change=1
# if status[i]==0 and change==1:
# # 判断图片相似度,判断模特进出
# cut_point.append(status[i])
# else:
# pass
#detect_obj.drawFaces(path+'/'+i)
#drawEyes('./resources/pic/slx.jpg')
# drawSmiles('obama.jpg')
#detect_obj.saveFaces(path+'/'+i)
#print("图片中的文字",detect_obj.img_to_str('./resource/pic/WechatIMG231.png').replace(' ',''))
"""
上面的代码将眼睛、人脸、笑脸在不同的图像上框出,如果需要在同一张图像上框出,改一下代码就可以了。
总之,利用opencv里训练好的haar特征的xml文件,在图片上检测出人脸的坐标,利用这个坐标,我们可以将人脸区域剪切保存,也可以在原图上将人脸框出。剪切保存人脸以及用矩形工具框出人脸,本程序使用的是PIL里的Image、ImageDraw模块。
此外,opencv里面也有画矩形的模块,同样可以用来框出人脸。
"""
# opencv_createsamples -vec pos.vec -info pos.txt -num 15 -w 60 -h 60 pause
# opencv_traincascade -data cascades -vec pos.vec -bg neg.txt -numPos 15 -numNeg 15 -numStages 5 -w 60 -h 60 -minHitRate 0.9999 -maxFalseAlarmRate 0.5 -mem 2048 -mode ALL
| [
"PIL.Image.open",
"os.listdir",
"cv2.cv2.imread",
"cv2.cv2.CascadeClassifier",
"datetime.datetime.now",
"numpy.array",
"PIL.ImageDraw.Draw",
"pytesseract.image_to_string",
"cv2.cv2.cvtColor"
] | [((4069, 4083), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4081, 4083), False, 'from datetime import datetime\n'), ((545, 567), 'cv2.cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (555, 567), False, 'from cv2 import cv2\n'), ((591, 676), 'cv2.cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./haar/haarcascades/haarcascade_frontalface_default.xml"""'], {}), "('./haar/haarcascades/haarcascade_frontalface_default.xml'\n )\n", (612, 676), False, 'from cv2 import cv2\n'), ((2109, 2173), 'cv2.cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./haar/haarcascades/haarcascade_eye.xml"""'], {}), "('./haar/haarcascades/haarcascade_eye.xml')\n", (2130, 2173), False, 'from cv2 import cv2\n'), ((2238, 2260), 'cv2.cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (2248, 2260), False, 'from cv2 import cv2\n'), ((2276, 2313), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2288, 2313), False, 'from cv2 import cv2\n'), ((3027, 3049), 'cv2.cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (3037, 3049), False, 'from cv2 import cv2\n'), ((3075, 3141), 'cv2.cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./haar/haarcascades/haarcascade_smile.xml"""'], {}), "('./haar/haarcascades/haarcascade_smile.xml')\n", (3096, 3141), False, 'from cv2 import cv2\n'), ((3930, 3945), 'PIL.Image.open', 'Image.open', (['url'], {}), '(url)\n', (3940, 3945), False, 'from PIL import Image, ImageDraw\n'), ((3975, 4013), 'pytesseract.image_to_string', 'image_to_string', (['image'], {'lang': '"""chi_sim"""'}), "(image, lang='chi_sim')\n", (3990, 4013), False, 'from pytesseract import image_to_string\n'), ((4171, 4187), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4181, 4187), False, 'import os\n'), ((4839, 4855), 'numpy.array', 'np.array', (['status'], {}), '(status)\n', (4847, 4855), True, 'import numpy as np\n'), ((717, 754), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (729, 754), False, 'from cv2 import cv2\n'), ((1682, 1704), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (1692, 1704), False, 'from PIL import Image, ImageDraw\n'), ((1733, 1752), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (1747, 1752), False, 'from PIL import Image, ImageDraw\n'), ((2739, 2761), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (2749, 2761), False, 'from PIL import Image, ImageDraw\n'), ((2790, 2809), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (2804, 2809), False, 'from PIL import Image, ImageDraw\n'), ((3187, 3224), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3199, 3224), False, 'from cv2 import cv2\n'), ((3661, 3683), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (3671, 3683), False, 'from PIL import Image, ImageDraw\n'), ((3712, 3731), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (3726, 3731), False, 'from PIL import Image, ImageDraw\n'), ((4384, 4398), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4396, 4398), False, 'from datetime import datetime\n'), ((1252, 1274), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (1262, 1274), False, 'from PIL import Image, ImageDraw\n')] |
import os, sys
import numpy as np
from math import sqrt
# testing without install
#sys.path.insert(0, '../build/lib.macosx-10.9-x86_64-3.8')
import poppunk_refine
# Original PopPUNK function (with some improvements)
def withinBoundary(dists, x_max, y_max, slope=2):
boundary_test = np.ones((dists.shape[0]))
for row in range(boundary_test.size):
if slope == 2:
in_tri = dists[row, 1] * x_max + dists[row, 0] * y_max - x_max * y_max
elif slope == 0:
in_tri = dists[row, 0] - x_max
elif slope == 1:
in_tri = dists[row, 1] - y_max
if abs(in_tri) < np.finfo(np.float32).eps:
boundary_test[row] = 0
elif in_tri < 0:
boundary_test[row] = -1
return(boundary_test)
def check_tuples(t1, t2):
for t in t1:
if t not in t2:
raise RuntimeError("Results don't match")
def iter_tuples(assign_results, n_samples):
tuple_list = []
idx = 0
for i in range(n_samples):
for j in range(i + 1, n_samples):
if assign_results[idx] == -1:
tuple_list.append((i, j))
idx += 1
return tuple_list
def check_res(res, expected):
if (not np.all(res == expected)):
print(res)
print(expected)
raise RuntimeError("Results don't match")
# assigning
x = np.arange(0, 1, 0.1, dtype=np.float32)
y = np.arange(0, 1, 0.1, dtype=np.float32)
xv, yv = np.meshgrid(x, y)
distMat = np.hstack((xv.reshape(-1,1), yv.reshape(-1,1)))
assign0 = poppunk_refine.assignThreshold(distMat, 0, 0.5, 0.5, 2)
assign1 = poppunk_refine.assignThreshold(distMat, 1, 0.5, 0.5, 2)
assign2 = poppunk_refine.assignThreshold(distMat, 2, 0.5, 0.5, 2)
assign0_res = withinBoundary(distMat, 0.5, 0.5, 0)
assign1_res = withinBoundary(distMat, 0.5, 0.5, 1)
assign2_res = withinBoundary(distMat, 0.5, 0.5, 2)
check_res(assign0, assign0_res)
check_res(assign1, assign1_res)
check_res(assign2, assign2_res)
# Check results when returned as tuple
samples = 100
distMat = np.random.rand(int(0.5 * samples * (samples - 1)), 2)
distMat = np.array(distMat, dtype = np.float32)
assign0_res = withinBoundary(distMat, 0.5, 0.5, 0)
assign0_edge_res = iter_tuples(assign0_res, samples)
check_tuples(assign0_edge_res,
poppunk_refine.generateTuples([int(x) for x in assign0_res], -1))
assign1_edge_res = iter_tuples(withinBoundary(distMat, 0.5, 0.5, 1), samples)
assign2_edge_res = iter_tuples(withinBoundary(distMat, 0.5, 0.5, 2), samples)
assign0_edges = poppunk_refine.edgeThreshold(distMat, 0, 0.5, 0.5)
assign1_edges = poppunk_refine.edgeThreshold(distMat, 1, 0.5, 0.5)
assign2_edges = poppunk_refine.edgeThreshold(distMat, 2, 0.5, 0.5)
check_tuples(assign0_edges, assign0_edge_res)
check_tuples(assign1_edges, assign1_edge_res)
check_tuples(assign2_edges, assign2_edge_res)
# move boundary 1D
# example is symmetrical at points (0.1, 0.1); (0.2, 0.2); (0.3, 0.3)
offsets = [x * sqrt(2) for x in [-0.1, 0.0, 0.1]]
i_vec, j_vec, idx_vec = poppunk_refine.thresholdIterate1D(distMat, offsets, 2, 0.2, 0.2, 0.3, 0.3)
sketchlib_i = []
sketchlib_j = []
for offset_idx, offset in enumerate(offsets):
for i, j, idx in zip(i_vec, j_vec, idx_vec):
if idx > offset_idx:
break
elif idx == offset_idx:
sketchlib_i.append(i)
sketchlib_j.append(j)
py_i = []
py_j = []
xmax = 0.4 + (2 * (offset/sqrt(2)))
assign = poppunk_refine.assignThreshold(distMat, 2, xmax, xmax, 1)
dist_idx = 0
for i in range(samples):
for j in range(i + 1, samples):
if assign[dist_idx] <= 0:
py_i.append(i)
py_j.append(j)
dist_idx += 1
if set(zip(py_i, py_j)) != set(zip(sketchlib_i, sketchlib_j)):
raise RuntimeError("Threshold 1D iterate mismatch at offset " + str(offset))
# move boundary 2D
# example is for boundaries (0.1, 0.2); (0.2, 0.2); (0.3, 0.2)
offsets = [0.1, 0.2, 0.3]
y_max = 0.2
i_vec, j_vec, idx_vec = poppunk_refine.thresholdIterate2D(distMat, offsets, y_max)
sketchlib_i = []
sketchlib_j = []
for offset_idx, offset in enumerate(offsets):
for i, j, idx in zip(i_vec, j_vec, idx_vec):
if idx > offset_idx:
break
elif idx == offset_idx:
sketchlib_i.append(i)
sketchlib_j.append(j)
py_i = []
py_j = []
assign = poppunk_refine.assignThreshold(distMat, 2, offset, y_max, 1)
dist_idx = 0
for i in range(samples):
for j in range(i + 1, samples):
if assign[dist_idx] <= 0:
py_i.append(i)
py_j.append(j)
dist_idx += 1
if set(zip(py_i, py_j)) != set(zip(sketchlib_i, sketchlib_j)):
raise RuntimeError("Threshold 2D iterate mismatch at offset " + str(offset))
| [
"poppunk_refine.assignThreshold",
"numpy.ones",
"math.sqrt",
"poppunk_refine.edgeThreshold",
"poppunk_refine.thresholdIterate2D",
"numpy.array",
"poppunk_refine.thresholdIterate1D",
"numpy.finfo",
"numpy.meshgrid",
"numpy.all",
"numpy.arange"
] | [((1345, 1383), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {'dtype': 'np.float32'}), '(0, 1, 0.1, dtype=np.float32)\n', (1354, 1383), True, 'import numpy as np\n'), ((1388, 1426), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {'dtype': 'np.float32'}), '(0, 1, 0.1, dtype=np.float32)\n', (1397, 1426), True, 'import numpy as np\n'), ((1436, 1453), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1447, 1453), True, 'import numpy as np\n'), ((1522, 1577), 'poppunk_refine.assignThreshold', 'poppunk_refine.assignThreshold', (['distMat', '(0)', '(0.5)', '(0.5)', '(2)'], {}), '(distMat, 0, 0.5, 0.5, 2)\n', (1552, 1577), False, 'import poppunk_refine\n'), ((1588, 1643), 'poppunk_refine.assignThreshold', 'poppunk_refine.assignThreshold', (['distMat', '(1)', '(0.5)', '(0.5)', '(2)'], {}), '(distMat, 1, 0.5, 0.5, 2)\n', (1618, 1643), False, 'import poppunk_refine\n'), ((1654, 1709), 'poppunk_refine.assignThreshold', 'poppunk_refine.assignThreshold', (['distMat', '(2)', '(0.5)', '(0.5)', '(2)'], {}), '(distMat, 2, 0.5, 0.5, 2)\n', (1684, 1709), False, 'import poppunk_refine\n'), ((2089, 2124), 'numpy.array', 'np.array', (['distMat'], {'dtype': 'np.float32'}), '(distMat, dtype=np.float32)\n', (2097, 2124), True, 'import numpy as np\n'), ((2516, 2566), 'poppunk_refine.edgeThreshold', 'poppunk_refine.edgeThreshold', (['distMat', '(0)', '(0.5)', '(0.5)'], {}), '(distMat, 0, 0.5, 0.5)\n', (2544, 2566), False, 'import poppunk_refine\n'), ((2583, 2633), 'poppunk_refine.edgeThreshold', 'poppunk_refine.edgeThreshold', (['distMat', '(1)', '(0.5)', '(0.5)'], {}), '(distMat, 1, 0.5, 0.5)\n', (2611, 2633), False, 'import poppunk_refine\n'), ((2650, 2700), 'poppunk_refine.edgeThreshold', 'poppunk_refine.edgeThreshold', (['distMat', '(2)', '(0.5)', '(0.5)'], {}), '(distMat, 2, 0.5, 0.5)\n', (2678, 2700), False, 'import poppunk_refine\n'), ((3004, 3078), 'poppunk_refine.thresholdIterate1D', 'poppunk_refine.thresholdIterate1D', (['distMat', 'offsets', '(2)', '(0.2)', '(0.2)', '(0.3)', '(0.3)'], {}), '(distMat, offsets, 2, 0.2, 0.2, 0.3, 0.3)\n', (3037, 3078), False, 'import poppunk_refine\n'), ((3926, 3984), 'poppunk_refine.thresholdIterate2D', 'poppunk_refine.thresholdIterate2D', (['distMat', 'offsets', 'y_max'], {}), '(distMat, offsets, y_max)\n', (3959, 3984), False, 'import poppunk_refine\n'), ((288, 311), 'numpy.ones', 'np.ones', (['dists.shape[0]'], {}), '(dists.shape[0])\n', (295, 311), True, 'import numpy as np\n'), ((3401, 3458), 'poppunk_refine.assignThreshold', 'poppunk_refine.assignThreshold', (['distMat', '(2)', 'xmax', 'xmax', '(1)'], {}), '(distMat, 2, xmax, xmax, 1)\n', (3431, 3458), False, 'import poppunk_refine\n'), ((4269, 4329), 'poppunk_refine.assignThreshold', 'poppunk_refine.assignThreshold', (['distMat', '(2)', 'offset', 'y_max', '(1)'], {}), '(distMat, 2, offset, y_max, 1)\n', (4299, 4329), False, 'import poppunk_refine\n'), ((1209, 1232), 'numpy.all', 'np.all', (['(res == expected)'], {}), '(res == expected)\n', (1215, 1232), True, 'import numpy as np\n'), ((2945, 2952), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (2949, 2952), False, 'from math import sqrt\n'), ((623, 643), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (631, 643), True, 'import numpy as np\n'), ((3380, 3387), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (3384, 3387), False, 'from math import sqrt\n')] |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
"""
This module implements various equation of states.
Note: Most of the code were initially adapted from ASE and deltafactor by
@gmatteo but has since undergone major refactoring.
"""
from copy import deepcopy
import six
from abc import ABCMeta, abstractmethod
import logging
import warnings
import numpy as np
from scipy.optimize import leastsq, minimize
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.plotting import pretty_plot
__author__ = "<NAME>, <NAME>"
__credits__ = "Cormac Toher"
logger = logging.getLogger(__file__)
class EOSBase(six.with_metaclass(ABCMeta)):
"""
Abstract class that must be subcalssed by all equation of state
implementations.
"""
def __init__(self, volumes, energies):
"""
Args:
volumes (list/numpy.array): volumes in Ang^3
energies (list/numpy.array): energy in eV
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
# minimum energy(e0), buk modulus(b0),
# derivative of bulk modulus wrt pressure(b1), minimum volume(v0)
self._params = None
# the eos function parameters. It is the same as _params except for
# equation of states that uses polynomial fits(deltafactor and
# numerical_eos)
self.eos_params = None
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b/(2*a)
e0 = a*(v0**2) + b*v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError('The minimum volume of a fitted parabola is '
'not in the input volumes\n.')
return e0, b0, b1, v0
def fit(self):
"""
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
"""
# the objective function that will be minimized in the least square
# fitting
objective_func = lambda pars, x, y: y - self._func(x, pars)
self._params = self._initial_guess()
self.eos_params, ierr = leastsq(
objective_func, self._params, args=(self.volumes, self.energies))
# e0, b0, b1, v0
self._params = self.eos_params
if ierr not in [1, 2, 3, 4]:
raise EOSError("Optimal parameters not found")
@abstractmethod
def _func(self, volume, params):
"""
The equation of state function. This must be implemented by all classes
that derive from this abstract class.
Args:
volume (float/numpy.array)
params (list/tuple): values for the parameters other than the
volume used by the eos.
"""
pass
def func(self, volume):
"""
The equation of state function with the paramters other than volume set
to the ones obtained from fitting.
Args:
volume (list/numpy.array)
Returns:
numpy.array
"""
return self._func(np.array(volume), self.eos_params)
def __call__(self, volume):
return self.func(volume)
@property
def e0(self):
"""
Returns the min energy.
"""
return self._params[0]
@property
def b0(self):
"""
Returns the bulk modulus.
Note: the units for the bulk modulus: unit of energy/unit of volume^3.
"""
return self._params[1]
@property
def b0_GPa(self):
"""
Returns the bulk modulus in GPa.
Note: This assumes that the energy and volumes are in eV and Ang^3
respectively
"""
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
@property
def b1(self):
"""
Returns the derivative of bulk modulus wrt pressure(dimensionless)
"""
return self._params[2]
@property
def v0(self):
"""
Returns the minimum or the reference volume in Ang^3.
"""
return self._params[3]
@property
def results(self):
"""
Returns a summary dict.
Returns:
dict
"""
return dict(e0=self.e0, b0=self.b0, b1=self.b1, v0=self.v0)
def plot(self, width=8, height=None, plt=None, dpi=None, **kwargs):
"""
Plot the equation of state.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width *
golden ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made
to an existing plot. Otherwise, a new plot will be created.
dpi:
kwargs (dict): additional args fed to pyplot.plot.
supported keys: style, color, text, label
Returns:
Matplotlib plot object.
"""
plt = pretty_plot(width=width, height=height, plt=plt, dpi=dpi)
color = kwargs.get("color", "r")
label = kwargs.get("label", "{} fit".format(self.__class__.__name__))
lines = ["Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa" %
(self.b0, self.b0_GPa),
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
plt.plot(self.volumes, self.energies, linestyle="None", marker="o",
color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
plt.plot(vfit, self.func(vfit), linestyle="dashed", color=color,
label=label)
plt.grid(True)
plt.xlabel("Volume $\\AA^3$")
plt.ylabel("Energy (eV)")
plt.legend(loc="best", shadow=True)
# Add text with fit parameters.
plt.text(0.4, 0.5, text, transform=plt.gca().transAxes)
return plt
class Murnaghan(EOSBase):
def _func(self, volume, params):
"""
From PRB 28,5480 (1983)
"""
e0, b0, b1, v0 = tuple(params)
return (e0 +
b0 * volume / b1 * (((v0 / volume)**b1) / (b1 - 1.0) + 1.0) -
v0 * b0 / (b1 - 1.0))
class Birch(EOSBase):
def _func(self, volume, params):
"""
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by <NAME>. <NAME>,
<NAME>los.
case where n=0
"""
e0, b0, b1, v0 = tuple(params)
return (e0
+ 9.0 / 8.0 * b0 * v0 * ((v0 / volume)**(2.0/3.0) - 1.0) ** 2
+ 9.0 / 16.0 * b0 * v0 * (b1 - 4.) *
((v0 / volume)**(2.0/3.0) - 1.0) ** 3)
class BirchMurnaghan(EOSBase):
def _func(self, volume, params):
"""
BirchMurnaghan equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
return (e0 +
9. * b0 * v0 / 16. * (eta ** 2 - 1)**2 *
(6 + b1 * (eta ** 2 - 1.) - 4. * eta ** 2))
class PourierTarantola(EOSBase):
def _func(self, volume, params):
"""
Pourier-Tarantola equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
squiggle = -3.*np.log(eta)
return e0 + b0 * v0 * squiggle ** 2 / 6. * (3. + squiggle * (b1 - 2))
class Vinet(EOSBase):
def _func(self, volume, params):
"""
Vinet equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
return (e0 + 2. * b0 * v0 / (b1 - 1.) ** 2
* (2. - (5. + 3. * b1 * (eta - 1.) - 3. * eta)
* np.exp(-3. * (b1 - 1.) * (eta - 1.) / 2.)))
class PolynomialEOS(EOSBase):
"""
Derives from EOSBase. Polynomial based equations of states must subclass
this.
"""
def _func(self, volume, params):
return np.poly1d(list(params))(volume)
def fit(self, order):
"""
Do polynomial fitting and set the parameters. Uses numpy polyfit.
Args:
order (int): order of the fit polynomial
"""
self.eos_params = np.polyfit(self.volumes, self.energies, order)
self._set_params()
def _set_params(self):
"""
Use the fit polynomial to compute the parameter e0, b0, b1 and v0
and set to the _params attribute.
"""
fit_poly = np.poly1d(self.eos_params)
# the volume at min energy, used as the intial guess for the
# optimization wrt volume.
v_e_min = self.volumes[np.argmin(self.energies)]
# evaluate e0, v0, b0 and b1
min_wrt_v = minimize(fit_poly, v_e_min)
e0, v0 = min_wrt_v.fun, min_wrt_v.x[0]
pderiv2 = np.polyder(fit_poly, 2)
pderiv3 = np.polyder(fit_poly, 3)
b0 = v0 * np.poly1d(pderiv2)(v0)
db0dv = np.poly1d(pderiv2)(v0) + v0 * np.poly1d(pderiv3)(v0)
# db/dp
b1 = - v0 * db0dv / b0
self._params = [e0, b0, b1, v0]
class DeltaFactor(PolynomialEOS):
def _func(self, volume, params):
x = volume**(-2. / 3.)
return np.poly1d(list(params))(x)
def fit(self, order=3):
"""
Overriden since this eos works with volume**(2/3) instead of volume.
"""
x = self.volumes**(-2./3.)
self.eos_params = np.polyfit(x, self.energies, order)
self._set_params()
def _set_params(self):
"""
Overriden to account for the fact the fit with volume**(2/3) instead
of volume.
"""
deriv0 = np.poly1d(self.eos_params)
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x**(-3./2.)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4./9. * x**5. * deriv2(x)
derivV3 = (-20./9. * x**(13./2.) * deriv2(x) - 8./27. *
x**(15./2.) * deriv3(x))
b0 = derivV2 / x**(3./2.)
b1 = -1 - x**(-3./2.) * derivV3 / derivV2
# e0, b0, b1, v0
self._params = [deriv0(v0**(-2./3.)), b0, b1, v0]
class NumericalEOS(PolynomialEOS):
def fit(self, min_ndata_factor=3, max_poly_order_factor=5, min_poly_order=2):
"""
Fit the input data to the 'numerical eos', the equation of state employed
in the quasiharmonic Debye model described in the paper:
10.1103/PhysRevB.90.174107.
credits: <NAME>
Args:
min_ndata_factor (int): parameter that controls the minimum number
of data points that will be used for fitting.
minimum number of data points =
total data points-2*min_ndata_factor
max_poly_order_factor (int): parameter that limits the max order
of the polynomial used for fitting.
max_poly_order = number of data points used for fitting -
max_poly_order_factor
min_poly_order (int): minimum order of the polynomial to be
considered for fitting.
"""
warnings.simplefilter('ignore', np.RankWarning)
get_rms = lambda x, y: np.sqrt(np.sum((np.array(x)-np.array(y))**2)/len(x))
# list of (energy, volume) tuples
e_v = [(i, j) for i, j in zip(self.energies, self.volumes)]
ndata = len(e_v)
# minimum number of data points used for fitting
ndata_min = max(ndata - 2 * min_ndata_factor, min_poly_order + 1)
rms_min = np.inf
# number of data points available for fit in each iteration
ndata_fit = ndata
# store the fit polynomial coefficients and the rms in a dict,
# where the key=(polynomial order, number of data points used for
# fitting)
all_coeffs = {}
# sort by energy
e_v = sorted(e_v, key=lambda x: x[0])
# minimum energy tuple
e_min = e_v[0]
# sort by volume
e_v = sorted(e_v, key=lambda x: x[1])
# index of minimum energy tuple in the volume sorted list
emin_idx = e_v.index(e_min)
# the volume lower than the volume corresponding to minimum energy
v_before = e_v[emin_idx - 1][1]
# the volume higher than the volume corresponding to minimum energy
v_after = e_v[emin_idx + 1][1]
e_v_work = deepcopy(e_v)
# loop over the data points.
while (ndata_fit >= ndata_min) and (e_min in e_v_work):
max_poly_order = ndata_fit - max_poly_order_factor
e = [ei[0] for ei in e_v_work]
v = [ei[1] for ei in e_v_work]
# loop over polynomial order
for i in range(min_poly_order, max_poly_order + 1):
coeffs = np.polyfit(v, e, i)
pder = np.polyder(coeffs)
a = np.poly1d(pder)(v_before)
b = np.poly1d(pder)(v_after)
if a * b < 0:
rms = get_rms(e, np.poly1d(coeffs)(v))
rms_min = min(rms_min, rms * i / ndata_fit)
all_coeffs[(i, ndata_fit)] = [coeffs.tolist(), rms]
# store the fit coefficients small to large,
# i.e a0, a1, .. an
all_coeffs[(i, ndata_fit)][0].reverse()
# remove 1 data point from each end.
e_v_work.pop()
e_v_work.pop(0)
ndata_fit = len(e_v_work)
logger.info("total number of polynomials: {}".format(len(all_coeffs)))
norm = 0.
fit_poly_order = ndata
# weight average polynomial coefficients.
weighted_avg_coeffs = np.zeros((fit_poly_order,))
# combine all the filtered polynomial candidates to get the final fit.
for k, v in all_coeffs.items():
# weighted rms = rms * polynomial order / rms_min / ndata_fit
weighted_rms = v[1] * k[0] / rms_min / k[1]
weight = np.exp(-(weighted_rms ** 2))
norm += weight
coeffs = np.array(v[0])
# pad the coefficient array with zeros
coeffs = np.lib.pad(coeffs,
(0, max(fit_poly_order-len(coeffs), 0)),
'constant')
weighted_avg_coeffs += weight * coeffs
# normalization
weighted_avg_coeffs /= norm
weighted_avg_coeffs = weighted_avg_coeffs.tolist()
# large to small(an, an-1, ..., a1, a0) as expected by np.poly1d
weighted_avg_coeffs.reverse()
self.eos_params = weighted_avg_coeffs
self._set_params()
class EOS(object):
"""
Convenient wrapper. Retained in its original state to ensure backward
compatibility.
Fit equation of state for bulk systems.
The following equations are supported::
murnaghan: PRB 28, 5480 (1983)
birch: Intermetallic compounds: Principles and Practice, Vol I:
Principles. pages 195-210
birch_murnaghan: PRB 70, 224107
pourier_tarantola: PRB 70, 224107
vinet: PRB 70, 224107
deltafactor
numerical_eos: 10.1103/PhysRevB.90.174107.
Usage::
eos = EOS(eos_name='murnaghan')
eos_fit = eos.fit(volumes, energies)
eos_fit.plot()
"""
MODELS = {
"murnaghan": Murnaghan,
"birch": Birch,
"birch_murnaghan": BirchMurnaghan,
"pourier_tarantola": PourierTarantola,
"vinet": Vinet,
"deltafactor": DeltaFactor,
"numerical_eos": NumericalEOS
}
def __init__(self, eos_name='murnaghan'):
if eos_name not in self.MODELS:
raise EOSError("The equation of state '{}' is not supported. "
"Please choose one from the following list: {}".
format(eos_name, list(self.MODELS.keys())))
self._eos_name = eos_name
self.model = self.MODELS[eos_name]
def fit(self, volumes, energies):
"""
Fit energies as function of volumes.
Args:
volumes (list/np.array)
energies (list/np.array)
Returns:
EOSBase: EOSBase object
"""
eos_fit = self.model(np.array(volumes), np.array(energies))
eos_fit.fit()
return eos_fit
class EOSError(Exception): pass
| [
"logging.getLogger",
"numpy.polyfit",
"pymatgen.util.plotting.pretty_plot",
"scipy.optimize.minimize",
"numpy.log",
"numpy.roots",
"numpy.exp",
"numpy.array",
"scipy.optimize.leastsq",
"numpy.linspace",
"numpy.polyder",
"warnings.simplefilter",
"six.with_metaclass",
"copy.deepcopy",
"num... | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (718, 728), False, 'import logging\n'), ((745, 772), 'six.with_metaclass', 'six.with_metaclass', (['ABCMeta'], {}), '(ABCMeta)\n', (763, 772), False, 'import six\n'), ((1099, 1116), 'numpy.array', 'np.array', (['volumes'], {}), '(volumes)\n', (1107, 1116), True, 'import numpy as np\n'), ((1141, 1159), 'numpy.array', 'np.array', (['energies'], {}), '(energies)\n', (1149, 1159), True, 'import numpy as np\n'), ((1705, 1747), 'numpy.polyfit', 'np.polyfit', (['self.volumes', 'self.energies', '(2)'], {}), '(self.volumes, self.energies, 2)\n', (1715, 1747), True, 'import numpy as np\n'), ((2578, 2651), 'scipy.optimize.leastsq', 'leastsq', (['objective_func', 'self._params'], {'args': '(self.volumes, self.energies)'}), '(objective_func, self._params, args=(self.volumes, self.energies))\n', (2585, 2651), False, 'from scipy.optimize import leastsq, minimize\n'), ((5389, 5446), 'pymatgen.util.plotting.pretty_plot', 'pretty_plot', ([], {'width': 'width', 'height': 'height', 'plt': 'plt', 'dpi': 'dpi'}), '(width=width, height=height, plt=plt, dpi=dpi)\n', (5400, 5446), False, 'from pymatgen.util.plotting import pretty_plot\n'), ((6321, 6349), 'numpy.linspace', 'np.linspace', (['vmin', 'vmax', '(100)'], {}), '(vmin, vmax, 100)\n', (6332, 6349), True, 'import numpy as np\n'), ((9059, 9105), 'numpy.polyfit', 'np.polyfit', (['self.volumes', 'self.energies', 'order'], {}), '(self.volumes, self.energies, order)\n', (9069, 9105), True, 'import numpy as np\n'), ((9320, 9346), 'numpy.poly1d', 'np.poly1d', (['self.eos_params'], {}), '(self.eos_params)\n', (9329, 9346), True, 'import numpy as np\n'), ((9565, 9592), 'scipy.optimize.minimize', 'minimize', (['fit_poly', 'v_e_min'], {}), '(fit_poly, v_e_min)\n', (9573, 9592), False, 'from scipy.optimize import leastsq, minimize\n'), ((9658, 9681), 'numpy.polyder', 'np.polyder', (['fit_poly', '(2)'], {}), '(fit_poly, 2)\n', (9668, 9681), True, 'import numpy as np\n'), ((9700, 9723), 'numpy.polyder', 'np.polyder', (['fit_poly', '(3)'], {}), '(fit_poly, 3)\n', (9710, 9723), True, 'import numpy as np\n'), ((10259, 10294), 'numpy.polyfit', 'np.polyfit', (['x', 'self.energies', 'order'], {}), '(x, self.energies, order)\n', (10269, 10294), True, 'import numpy as np\n'), ((10487, 10513), 'numpy.poly1d', 'np.poly1d', (['self.eos_params'], {}), '(self.eos_params)\n', (10496, 10513), True, 'import numpy as np\n'), ((10531, 10552), 'numpy.polyder', 'np.polyder', (['deriv0', '(1)'], {}), '(deriv0, 1)\n', (10541, 10552), True, 'import numpy as np\n'), ((10570, 10591), 'numpy.polyder', 'np.polyder', (['deriv1', '(1)'], {}), '(deriv1, 1)\n', (10580, 10591), True, 'import numpy as np\n'), ((10609, 10630), 'numpy.polyder', 'np.polyder', (['deriv2', '(1)'], {}), '(deriv2, 1)\n', (10619, 10630), True, 'import numpy as np\n'), ((10649, 10665), 'numpy.roots', 'np.roots', (['deriv1'], {}), '(deriv1)\n', (10657, 10665), True, 'import numpy as np\n'), ((12144, 12191), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'np.RankWarning'], {}), "('ignore', np.RankWarning)\n", (12165, 12191), False, 'import warnings\n'), ((13399, 13412), 'copy.deepcopy', 'deepcopy', (['e_v'], {}), '(e_v)\n', (13407, 13412), False, 'from copy import deepcopy\n'), ((14689, 14716), 'numpy.zeros', 'np.zeros', (['(fit_poly_order,)'], {}), '((fit_poly_order,))\n', (14697, 14716), True, 'import numpy as np\n'), ((3513, 3529), 'numpy.array', 'np.array', (['volume'], {}), '(volume)\n', (3521, 3529), True, 'import numpy as np\n'), ((8141, 8152), 'numpy.log', 'np.log', (['eta'], {}), '(eta)\n', (8147, 8152), True, 'import numpy as np\n'), ((9482, 9506), 'numpy.argmin', 'np.argmin', (['self.energies'], {}), '(self.energies)\n', (9491, 9506), True, 'import numpy as np\n'), ((14988, 15014), 'numpy.exp', 'np.exp', (['(-weighted_rms ** 2)'], {}), '(-weighted_rms ** 2)\n', (14994, 15014), True, 'import numpy as np\n'), ((15065, 15079), 'numpy.array', 'np.array', (['v[0]'], {}), '(v[0])\n', (15073, 15079), True, 'import numpy as np\n'), ((17242, 17259), 'numpy.array', 'np.array', (['volumes'], {}), '(volumes)\n', (17250, 17259), True, 'import numpy as np\n'), ((17261, 17279), 'numpy.array', 'np.array', (['energies'], {}), '(energies)\n', (17269, 17279), True, 'import numpy as np\n'), ((4152, 4187), 'pymatgen.core.units.FloatWithUnit', 'FloatWithUnit', (['self.b0', '"""eV ang^-3"""'], {}), "(self.b0, 'eV ang^-3')\n", (4165, 4187), False, 'from pymatgen.core.units import FloatWithUnit\n'), ((9742, 9760), 'numpy.poly1d', 'np.poly1d', (['pderiv2'], {}), '(pderiv2)\n', (9751, 9760), True, 'import numpy as np\n'), ((9781, 9799), 'numpy.poly1d', 'np.poly1d', (['pderiv2'], {}), '(pderiv2)\n', (9790, 9799), True, 'import numpy as np\n'), ((13794, 13813), 'numpy.polyfit', 'np.polyfit', (['v', 'e', 'i'], {}), '(v, e, i)\n', (13804, 13813), True, 'import numpy as np\n'), ((13837, 13855), 'numpy.polyder', 'np.polyder', (['coeffs'], {}), '(coeffs)\n', (13847, 13855), True, 'import numpy as np\n'), ((9811, 9829), 'numpy.poly1d', 'np.poly1d', (['pderiv3'], {}), '(pderiv3)\n', (9820, 9829), True, 'import numpy as np\n'), ((13876, 13891), 'numpy.poly1d', 'np.poly1d', (['pder'], {}), '(pder)\n', (13885, 13891), True, 'import numpy as np\n'), ((13922, 13937), 'numpy.poly1d', 'np.poly1d', (['pder'], {}), '(pder)\n', (13931, 13937), True, 'import numpy as np\n'), ((8575, 8620), 'numpy.exp', 'np.exp', (['(-3.0 * (b1 - 1.0) * (eta - 1.0) / 2.0)'], {}), '(-3.0 * (b1 - 1.0) * (eta - 1.0) / 2.0)\n', (8581, 8620), True, 'import numpy as np\n'), ((14014, 14031), 'numpy.poly1d', 'np.poly1d', (['coeffs'], {}), '(coeffs)\n', (14023, 14031), True, 'import numpy as np\n'), ((12240, 12251), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (12248, 12251), True, 'import numpy as np\n'), ((12252, 12263), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (12260, 12263), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import tensorflow as tf
import sys
from demographics_architecture import image_size, cnn_architecture
tf.logging.set_verbosity(tf.logging.INFO)
# Set default flags for the output directories
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
flag_name='checkpoint_path', default_value='',
docstring='Checkpoint path')
tf.app.flags.DEFINE_string(flag_name="network_name",
default_value="inception_v4", docstring="Network architecture to use")
# MNIST sample images
IMAGE_URLS = [
'../dataset/faces/v0.2_Angry_1.jpg',
'../dataset/faces/v0.2_Angry_2.jpg',
'../dataset/faces/v0.2_Angry_3.jpg'
]
def predict_from_list():
# Create placeholders
X = tf.placeholder(dtype=tf.float32, shape=(
None, image_size, image_size, 3), name='X')
# Load net architecture
Y_age_pred, Y_eth_pred, Y_gender_pred = cnn_architecture(
X, is_training=False, network_name=FLAGS.network_name)
saver = tf.train.Saver()
# Open session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Restore weights
# if tf.gfile.Exists(FLAGS.checkpoint_path):
# saver.restore(sess, FLAGS.checkpoint_path)
# else:
# tf.logging.error("Checkpoint file {} not found".format(FLAGS.checkpoint_path))
# sys.exit(0)
saver.restore(sess, FLAGS.checkpoint_path)
# Make predictions
for img_path in IMAGE_URLS:
tf.logging.info("Predict {}".format(img_path))
# Read and preprocess image
img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB).astype(np.float)
img /= 255.0
img = np.expand_dims(cv2.resize(img, (image_size, image_size)),axis=0)
Y_age_pred_v, Y_gender_pred_v, Y_eth_v = sess.run(
[Y_age_pred, Y_gender_pred, Y_eth_pred], feed_dict={X: img})
Y_age_pred_v = np.argmax(Y_age_pred_v,axis=1)[0]
Y_gender_pred_v = np.argmax(Y_gender_pred_v,axis=1)[0]
Y_eth_v = np.argmax(Y_eth_v,axis=1)[0]
tf.logging.info("{} - Age: {} Gender: {} Ethniticy: {}".format(
img_path, Y_age_pred_v, Y_gender_pred_v, Y_eth_v))
if __name__ == "__main__":
predict_from_list() | [
"demographics_architecture.cnn_architecture",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.app.flags.DEFINE_string",
"numpy.argmax",
"tensorflow.global_variables_initializer",
"cv2.resize",
"cv2.imread"
] | [((159, 200), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (183, 200), True, 'import tensorflow as tf\n'), ((276, 382), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', ([], {'flag_name': '"""checkpoint_path"""', 'default_value': '""""""', 'docstring': '"""Checkpoint path"""'}), "(flag_name='checkpoint_path', default_value='',\n docstring='Checkpoint path')\n", (302, 382), True, 'import tensorflow as tf\n'), ((388, 516), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', ([], {'flag_name': '"""network_name"""', 'default_value': '"""inception_v4"""', 'docstring': '"""Network architecture to use"""'}), "(flag_name='network_name', default_value=\n 'inception_v4', docstring='Network architecture to use')\n", (414, 516), True, 'import tensorflow as tf\n'), ((764, 851), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, image_size, image_size, 3)', 'name': '"""X"""'}), "(dtype=tf.float32, shape=(None, image_size, image_size, 3),\n name='X')\n", (778, 851), True, 'import tensorflow as tf\n'), ((930, 1001), 'demographics_architecture.cnn_architecture', 'cnn_architecture', (['X'], {'is_training': '(False)', 'network_name': 'FLAGS.network_name'}), '(X, is_training=False, network_name=FLAGS.network_name)\n', (946, 1001), False, 'from demographics_architecture import image_size, cnn_architecture\n'), ((1024, 1040), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1038, 1040), True, 'import tensorflow as tf\n'), ((1070, 1082), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1080, 1082), True, 'import tensorflow as tf\n'), ((1109, 1142), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1140, 1142), True, 'import tensorflow as tf\n'), ((1777, 1818), 'cv2.resize', 'cv2.resize', (['img', '(image_size, image_size)'], {}), '(img, (image_size, image_size))\n', (1787, 1818), False, 'import cv2\n'), ((1996, 2027), 'numpy.argmax', 'np.argmax', (['Y_age_pred_v'], {'axis': '(1)'}), '(Y_age_pred_v, axis=1)\n', (2005, 2027), True, 'import numpy as np\n'), ((2060, 2094), 'numpy.argmax', 'np.argmax', (['Y_gender_pred_v'], {'axis': '(1)'}), '(Y_gender_pred_v, axis=1)\n', (2069, 2094), True, 'import numpy as np\n'), ((2119, 2145), 'numpy.argmax', 'np.argmax', (['Y_eth_v'], {'axis': '(1)'}), '(Y_eth_v, axis=1)\n', (2128, 2145), True, 'import numpy as np\n'), ((1661, 1681), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1671, 1681), False, 'import cv2\n')] |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi time series forecasting problem."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import timeseries_data_generator
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
class TimeseriesProblem(problem.Problem):
"""Base Problem for multi timeseries datasets."""
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.RealEncoder(),
"targets": text_encoder.RealEncoder()
}
@property
def is_generate_per_split(self):
# generate_data will shard the data into TRAIN and EVAL for us.
return False
@property
def dataset_splits(self):
"""Splits of data to produce and number the output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": self.num_train_shards,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": self.num_eval_shards,
}, {
"split": problem.DatasetSplit.TEST,
"shards": self.num_test_shards,
}]
@property
def num_train_shards(self):
"""Number of training shards."""
return 9
@property
def num_eval_shards(self):
"""Number of eval shards."""
return 1
@property
def num_test_shards(self):
"""Number of test shards."""
return 1
@property
def num_series(self):
"""Number of timeseries."""
raise NotImplementedError()
@property
def num_input_timestamps(self):
"""Number of timestamps to include in the input."""
raise NotImplementedError()
@property
def num_target_timestamps(self):
"""Number of timestamps to include in the target."""
raise NotImplementedError()
def timeseries_dataset(self):
"""Multi-timeseries data [ timestamps , self.num_series ] ."""
raise NotImplementedError()
def eval_metrics(self):
eval_metrics = [metrics.Metrics.RMSE]
return eval_metrics
@property
def normalizing_constant(self):
"""Constant by which all data will be multiplied to be more normalized."""
return 1.0 # Adjust so that your loss is around 1 or 10 or 100, not 1e+9.
def preprocess_example(self, example, unused_mode, unused_hparams):
# Time series are flat on disk, we un-flatten them back here.
flat_inputs = example["inputs"]
flat_targets = example["targets"]
c = self.normalizing_constant
# Tensor2Tensor models expect [height, width, depth] examples, here we
# use height for time and set width to 1 and num_series is our depth.
example["inputs"] = tf.reshape(
flat_inputs, [self.num_input_timestamps, 1, self.num_series]) * c
example["targets"] = tf.reshape(
flat_targets, [self.num_target_timestamps, 1, self.num_series]) * c
return example
def generate_samples(self, data_dir, tmp_dir, dataset_split):
del data_dir
del tmp_dir
del dataset_split
series = self.timeseries_dataset()
num_timestamps = len(series)
# Generate samples with num_input_timestamps for "inputs" and
# num_target_timestamps in the "targets".
for split_index in range(self.num_input_timestamps,
num_timestamps - self.num_target_timestamps + 1):
inputs = series[split_index -
self.num_input_timestamps:split_index, :].tolist()
targets = series[split_index:split_index +
self.num_target_timestamps, :].tolist()
# We need to flatten the lists on disk for tf,Example to work.
flat_inputs = [item for sublist in inputs for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
example_keys = ["inputs", "targets"]
ex_dict = dict(zip(example_keys, [flat_inputs, flat_targets]))
yield ex_dict
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.input_modality = {"inputs": (registry.Modalities.REAL, self.num_series)}
p.target_modality = (registry.Modalities.REAL, self.num_series)
p.input_space_id = problem.SpaceID.REAL
p.target_space_id = problem.SpaceID.REAL
def generate_data(self, data_dir, tmp_dir, task_id=-1):
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
split_paths = [(split["split"], filepath_fns[split["split"]](
data_dir, split["shards"], shuffled=False))
for split in self.dataset_splits]
all_paths = []
for _, paths in split_paths:
all_paths.extend(paths)
if self.is_generate_per_split:
for split, paths in split_paths:
generator_utils.generate_files(
self.generate_samples(data_dir, tmp_dir, split), paths)
else:
generator_utils.generate_files(
self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN),
all_paths)
generator_utils.shuffle_dataset(all_paths)
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.float32),
"targets": tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
@registry.register_problem
class TimeseriesToyProblem(TimeseriesProblem):
"""Timeseries problem with a toy dataset."""
@property
def num_train_shards(self):
"""Number of training shards."""
return 1
@property
def num_eval_shards(self):
"""Number of eval shards."""
return 1
@property
def num_test_shards(self):
"""Number of eval shards."""
return 0
@property
def num_series(self):
"""Number of timeseries."""
return 2
@property
def num_input_timestamps(self):
"""Number of timestamps to include in the input."""
return 2
@property
def num_target_timestamps(self):
"""Number of timestamps to include in the target."""
return 2
def timeseries_dataset(self):
series = [[float(i + n) for n in range(self.num_series)] for i in range(10)]
return np.array(series)
@registry.register_problem
class TimeseriesSyntheticDataSeries10Samples100k(TimeseriesProblem):
"""10 synthetic timeseries with 100K samples/timestamps."""
@property
def num_train_shards(self):
"""Number of training shards."""
return 9
@property
def num_eval_shards(self):
"""Number of eval shards."""
return 1
@property
def num_series(self):
"""Number of timeseries."""
return 10
@property
def num_input_timestamps(self):
"""Number of timestamps to include in the input."""
return 250
@property
def num_target_timestamps(self):
"""Number of timestamps to include in the target."""
return 100
@property
def normalizing_constant(self):
return 0.01
@property
def timeseries_params(self):
"""Parameters for each timeseries."""
timeseries_params = [{
"m": 0.006,
"b": 300.0,
"A": 50.0,
"freqcoeff": 1500.0,
"rndA": 15.0,
"fn": np.sin
}, {
"m": 0.000,
"b": 500.0,
"A": 35.0,
"freqcoeff": 3500.0,
"rndA": 25.0,
"fn": np.cos
}, {
"m": -0.003,
"b": 800.0,
"A": 65.0,
"freqcoeff": 2500.0,
"rndA": 5.0,
"fn": np.sin
}, {
"m": 0.009,
"b": 600.0,
"A": 20.0,
"freqcoeff": 1000.0,
"rndA": 1.0,
"fn": np.cos
}, {
"m": 0.002,
"b": 700.0,
"A": 40.0,
"freqcoeff": 2000.0,
"rndA": 35.0,
"fn": np.sin
}, {
"m": -0.008,
"b": 1000.0,
"A": 70.0,
"freqcoeff": 3000.0,
"rndA": 25.0,
"fn": np.cos
}, {
"m": 0.000,
"b": 100.0,
"A": 25.0,
"freqcoeff": 1500.0,
"rndA": 10.0,
"fn": np.sin
}, {
"m": 0.004,
"b": 1500.0,
"A": 54.0,
"freqcoeff": 900.0,
"rndA": 55.0,
"fn": np.cos
}, {
"m": 0.005,
"b": 2000.0,
"A": 32.0,
"freqcoeff": 1100.0,
"rndA": 43.0,
"fn": np.sin
}, {
"m": 0.010,
"b": 2500.0,
"A": 43.0,
"freqcoeff": 1900.0,
"rndA": 53.0,
"fn": np.cos
}]
return timeseries_params
def timeseries_dataset(self):
series = np.array(
timeseries_data_generator.generate_data(100000, self.timeseries_params))
series = series.transpose()
return series
| [
"tensorflow.VarLenFeature",
"tensor2tensor.data_generators.timeseries_data_generator.generate_data",
"tensor2tensor.data_generators.text_encoder.RealEncoder",
"numpy.array",
"tensor2tensor.data_generators.generator_utils.shuffle_dataset",
"tensorflow.reshape"
] | [((5791, 5833), 'tensor2tensor.data_generators.generator_utils.shuffle_dataset', 'generator_utils.shuffle_dataset', (['all_paths'], {}), '(all_paths)\n', (5822, 5833), False, 'from tensor2tensor.data_generators import generator_utils\n'), ((6909, 6925), 'numpy.array', 'np.array', (['series'], {}), '(series)\n', (6917, 6925), True, 'import numpy as np\n'), ((1301, 1327), 'tensor2tensor.data_generators.text_encoder.RealEncoder', 'text_encoder.RealEncoder', ([], {}), '()\n', (1325, 1327), False, 'from tensor2tensor.data_generators import text_encoder\n'), ((1348, 1374), 'tensor2tensor.data_generators.text_encoder.RealEncoder', 'text_encoder.RealEncoder', ([], {}), '()\n', (1372, 1374), False, 'from tensor2tensor.data_generators import text_encoder\n'), ((3413, 3485), 'tensorflow.reshape', 'tf.reshape', (['flat_inputs', '[self.num_input_timestamps, 1, self.num_series]'], {}), '(flat_inputs, [self.num_input_timestamps, 1, self.num_series])\n', (3423, 3485), True, 'import tensorflow as tf\n'), ((3524, 3598), 'tensorflow.reshape', 'tf.reshape', (['flat_targets', '[self.num_target_timestamps, 1, self.num_series]'], {}), '(flat_targets, [self.num_target_timestamps, 1, self.num_series])\n', (3534, 3598), True, 'import tensorflow as tf\n'), ((5907, 5935), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (5923, 5935), True, 'import tensorflow as tf\n'), ((5956, 5984), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (5972, 5984), True, 'import tensorflow as tf\n'), ((9259, 9330), 'tensor2tensor.data_generators.timeseries_data_generator.generate_data', 'timeseries_data_generator.generate_data', (['(100000)', 'self.timeseries_params'], {}), '(100000, self.timeseries_params)\n', (9298, 9330), False, 'from tensor2tensor.data_generators import timeseries_data_generator\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pickle
import os
import argparse
import torch
from torch import distributions as td
#%%
def example1(e=1, N=10000):
#independent causes
beta = np.array([3.0, 2, 0])
x2_mean = np.random.uniform(0,1)
x2_e = torch.normal(x2_mean, e,[N,1])
x1_mean = np.random.uniform(0,1)
x1_e = torch.normal(x1_mean, e,[N,1])
y_e = beta[0] * x1_e + beta[1] * x2_e + torch.randn(N,1)
z = e*y_e + torch.normal(0, e, [N,1])
return (torch.cat((x1_e, x2_e, z), 1), y_e, beta)
def example2(e=1, N=10000):
#correlated causes
beta = np.array([2.0, 1.5, 0, 0])
x2_e = torch.normal(1, 0.5,[N,1])
x1_e = td.Uniform(-1,1).sample([N,1]) + x2_e
x3_e = torch.sin(x1_e) + torch.normal(0, 0.5,[N,1])
y_e = beta[0] * x1_e + beta[1] * x3_e + torch.randn(N,1)
z = e*y_e + torch.randn(N,1)
return (torch.cat((x1_e, x3_e, x2_e, z), 1), y_e, beta)
def example3(e=1, N=10000):
#mediator
beta = np.array([2.0, 1.5, 1.0, 0])
x2_e = torch.normal(1, 1/2.,[N,1])
x1_e = td.Uniform(-1,1).sample([N,1]) + x2_e
x3_e = torch.sin(x1_e) + torch.normal(0, 0.5,[N,1])
y_e = beta[0] * x1_e + beta[1] * x3_e + beta[2] * x2_e + torch.normal(0, e,[N,1])
z = e*y_e + torch.randn(N,1)
return (torch.cat((x1_e, x3_e, x2_e, z), 1), y_e, beta)
def example4(e=1, N=10000):
#unobserved mediator, observe ancestor
beta = np.array([2.0, 1.0, 0])
x2_e = torch.normal(1, 1/2.,[N,1])
x1_range = np.random.uniform(1,2)
x1_e = x2_e + td.Uniform(0,x1_range).sample([N,1])
u1 = x1_e + x2_e + torch.normal(0, 0.5,[N,1])
y_e = beta[0] * u1 + beta[1] * x2_e + torch.randn(N,1)
z = e*y_e + torch.randn(N,1)
beta[1] += beta[0]
return (torch.cat((x1_e, x2_e, z), 1), y_e, beta)
def example5(e=1, N=10000):
#collider
beta = np.array([2.0, 0])
x1_e = torch.normal(1, 1/2.,[N,1])
y_e = beta[0] * x1_e + torch.randn(N,1)
z = e*y_e/2 + 0.5*x1_e + torch.randn(N,1)
return (torch.cat((x1_e, z), 1), y_e, beta)
def IRMv1(environments, args, lmbd):
estimate_r = []
phi = torch.nn.Parameter(torch.normal(1,0.2,[environments[0][0].shape[1],1]))
dummy_w = torch.nn.Parameter(torch.Tensor([1.0]))
opt1 = torch.optim.Adam([phi], lr=args.lrs)
phi_old = 0
for iteration in range(args.max_iter):
error = 0
penalty = 0
for i in range(len(environments)):
x_e, y_e, beta = environments[i]
error_e = 0.5*mse(x_e @ phi * dummy_w, y_e).mean()
error += error_e
phi_grad_out = torch.autograd.grad(error_e, dummy_w, create_graph=True)
penalty += torch.square(phi_grad_out[0])
opt1.zero_grad()
total_loss = ((1/lmbd)*error +penalty)*100
total_loss.backward()
opt1.step()
estimate = phi.view(-1).detach().numpy()
estimate_r.append(estimate)
if iteration % 2000 == 0:
phi_new = np.mean(estimate_r[-100:],axis=0)
print(phi_new)
if ((np.sum(np.abs(phi_new - phi_old))<0.001) & (iteration>=10000)):
break
else:
phi_old = phi_new
return np.mean(estimate_r[-100:],axis=0)
def Naive_CoCo(environments, args):
estimate_r = []
phi = torch.nn.Parameter(torch.normal(1,0.2,[environments[0][0].shape[1],1]))
opt1 = torch.optim.Adam([phi], lr=args.lrs)
phi_old = 0
for iteration in range(args.max_iter):
error = 0
penalty = 0
for i in range(len(environments)):
x_e, y_e, beta = environments[i]
error_e = 0.5*mse(x_e @ phi, y_e).mean()
error += error_e
phi_grad_out = torch.autograd.grad(error_e, phi, create_graph=True)
penalty += torch.square(phi_grad_out[0][0] + \
torch.sum(phi_grad_out[0][1:]*phi[1:]))
opt1.zero_grad()
total_loss = (penalty)*100
total_loss.backward()
opt1.step()
estimate = phi.view(-1).detach().numpy()
estimate_r.append(estimate)
if iteration % 2000 == 0:
phi_new = np.mean(estimate_r[-100:],axis=0)
print(phi_new)
if ((np.sum(np.abs(phi_new - phi_old))<0.001) & (iteration>=10000)):
break
else:
phi_old = phi_new
return np.mean(estimate_r[-100:],axis=0)
def CoCo(environments, args):
estimate_r = []
phi = torch.nn.Parameter(torch.normal(1,0.2,[environments[0][0].shape[1],1]))
opt1 = torch.optim.Adam([phi], lr=args.lrs)
phi_old = 0
for iteration in range(args.max_iter):
error = 0
penalty = 0
for i in range(len(environments)):
x_e, y_e, beta = environments[i]
error_e = 0.5*mse(x_e @ phi, y_e).mean()
error += error_e
phi_grad_out = torch.autograd.grad(error_e, phi,create_graph=True)
penalty += torch.square(phi_grad_out[0][0]) + \
torch.sum(torch.square(phi_grad_out[0][1:]*phi[1:]))
opt1.zero_grad()
total_loss = torch.sqrt(penalty)
total_loss.backward()
opt1.step()
estimate = phi.view(-1).detach().numpy()
estimate_r.append(estimate)
if iteration % 2000 == 0:
phi_new = np.mean(estimate_r[-100:],axis=0)
print(phi_new)
if ((np.sum(np.abs(phi_new - phi_old))<0.001) & (iteration>=10000)):
break
else:
phi_old = phi_new
return np.mean(estimate_r[-100:],axis=0)
def ERM(environments, args):
estimate_r = []
phi = torch.nn.Parameter(torch.normal(1,0.2,[environments[0][0].shape[1],1]))
opt1 = torch.optim.SGD([phi], lr=0.002)
phi_old = 0
for iteration in range(args.max_iter):
error = 0
for i in range(len(environments)):
x_e, y_e, beta = environments[i]
error_e = 0.5*mse(x_e @ phi , y_e).mean()
error += error_e
opt1.zero_grad()
error.backward()
opt1.step()
estimate = phi.view(-1).detach().numpy()
estimate_r.append(estimate)
if iteration % 2000 == 0:
phi_new = np.mean(estimate_r[-100:],axis=0)
print(phi_new)
if ((np.sum(np.abs(phi_new - phi_old))<0.001) & (iteration>=10000)):
break
else:
phi_old = phi_new
return np.mean(estimate_r[-100:],axis=0)
def Rex(environments, args, lmbd):
estimate_r = []
phi = torch.nn.Parameter(torch.normal(1,0.2,[environments[0][0].shape[1],1]))
opt1 = torch.optim.Adam([phi], lr=args.lrs)
phi_old = 0
for iteration in range(args.max_iter):
error = []
for i in range(len(environments)):
x_e, y_e, beta = environments[i]
error_e = 0.5*mse(x_e @ phi, y_e).mean()
error.append(error_e)
losses = torch.stack(error)
opt1.zero_grad()
total_loss = losses.sum() + lmbd * losses.var()
total_loss.backward()
opt1.step()
estimate = phi.view(-1).detach().numpy()
estimate_r.append(estimate)
if iteration % 2000 == 0:
phi_new = np.mean(estimate_r[-100:],axis=0)
print(phi_new)
if ((np.sum(np.abs(phi_new - phi_old))<0.001) & (iteration>=10000)):
break
else:
phi_old = phi_new
return np.mean(estimate_r[-100:],axis=0)
def RVP(environments, args, lmbd):
estimate_r = []
phi = torch.nn.Parameter(torch.normal(1,0.2,[environments[0][0].shape[1],1]))
opt1 = torch.optim.Adam([phi], lr=args.lrs)
phi_old = 0
for iteration in range(args.max_iter):
error = []
for i in range(len(environments)):
x_e, y_e, beta = environments[i]
error_e = 0.5*mse(x_e @ phi, y_e).mean()
error.append(error_e)
losses = torch.stack(error)
opt1.zero_grad()
total_loss = (losses.sum() + lmbd * torch.sqrt(losses.var()+1e-8))*10
total_loss.backward()
opt1.step()
estimate = phi.view(-1).detach().numpy()
estimate_r.append(estimate)
if iteration % 2000 == 0:
phi_new = np.mean(estimate_r[-100:],axis=0)
print(phi_new)
if ((np.sum(np.abs(phi_new - phi_old))<0.001) & (iteration>=10000)):
break
else:
phi_old = phi_new
return np.mean(estimate_r[-100:],axis=0)
def danzig(environments, args):
Gs = []
Zs = []
for i in range(len(environments)):
x_e, y_e, beta = environments[i]
Gs.append(np.matmul(np.transpose(x_e),x_e)/len(x_e))
Zs.append(np.matmul(np.transpose(x_e),y_e)/len(x_e))
phi = torch.matmul(torch.inverse(Gs[0]-Gs[1]), Zs[0]-Zs[1])
return torch.squeeze(phi)
def run(methods, environments, args):
beta = environments[0][-1]
if 'IRMv1' in methods:
result1 = []
for lmbd in [2,20,200]:
print('IRMv1,', 'causal coef=', beta, 'lmbd=', lmbd)
result1.append(IRMv1(environments, args, lmbd=lmbd))
if 'CoCo' in methods:
print('CoCo,', 'causal coef=', beta)
result2 = CoCo(environments, args)
if 'ERM' in methods:
print('ERM,', 'causal coef=', beta)
result3 = ERM(environments, args)
if 'Naive_CoCo' in methods:
print('Naive_CoCo,', 'causal coef=', beta)
result4 = Naive_CoCo(environments, args)
if 'Rex' in methods:
result5 = []
for lmbd in [100,1000,10000]:
print('Rex,', 'causal coef=', beta)
result5.append(Rex(environments, args, lmbd=lmbd))
if 'RVP' in methods:
result6 = []
for lmbd in [10,100,1000]:
print('RVP,', 'causal coef=', beta)
result6.append(RVP(environments, args, lmbd=lmbd))
if 'danzig' in methods:
print('danzig,', 'causal coef=', beta)
result7 = danzig(environments, args)
return [result1, result2, result3, result4, result5, result6, result7, beta]
#%%
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=2, help='Random seed')
parser.add_argument('--max_iter', type=int, default=100000, help='max iteration.')
parser.add_argument('--N', type=int, default=10000, help='number of data per env.')
parser.add_argument('--path', default='results/', help='The path results to be saved.')
args = parser.parse_args()
np.set_printoptions(suppress=True, precision=2, linewidth=300)
path = os.path.join(args.path, f'dag_{args.seed}')
if not os.path.exists(path):
os.makedirs(path)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
examples = [example1, example2, example3, example4, example5]
methods = ['Naive_CoCo', 'IRMv1', 'CoCo','ERM','Rex','RVP', 'danzig']
results = []
for data_gen in examples:
print('#######################')
print(data_gen)
print('#######################')
mse = torch.nn.MSELoss(reduction="none")
environments = [data_gen(e = 0.5, N=args.N),
data_gen(e = 2, N=args.N)]
args.lrs = 0.1 if data_gen == example5 else 0.01
results.append(run(methods, environments, args))
pickle.dump(results,open(os.path.join(path, 'DAG.pkl'),'wb'))
| [
"torch.sin",
"torch.sqrt",
"torch.square",
"numpy.array",
"torch.nn.MSELoss",
"torch.normal",
"torch.sum",
"torch.squeeze",
"numpy.mean",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.random.seed",
"torch.randn",
"torch.distributions.Uniform",
"numpy.abs",
"torch.optim.SGD",
"t... | [((290, 311), 'numpy.array', 'np.array', (['[3.0, 2, 0]'], {}), '([3.0, 2, 0])\n', (298, 311), True, 'import numpy as np\n'), ((326, 349), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (343, 349), True, 'import numpy as np\n'), ((360, 392), 'torch.normal', 'torch.normal', (['x2_mean', 'e', '[N, 1]'], {}), '(x2_mean, e, [N, 1])\n', (372, 392), False, 'import torch\n'), ((405, 428), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (422, 428), True, 'import numpy as np\n'), ((439, 471), 'torch.normal', 'torch.normal', (['x1_mean', 'e', '[N, 1]'], {}), '(x1_mean, e, [N, 1])\n', (451, 471), False, 'import torch\n'), ((692, 718), 'numpy.array', 'np.array', (['[2.0, 1.5, 0, 0]'], {}), '([2.0, 1.5, 0, 0])\n', (700, 718), True, 'import numpy as np\n'), ((730, 758), 'torch.normal', 'torch.normal', (['(1)', '(0.5)', '[N, 1]'], {}), '(1, 0.5, [N, 1])\n', (742, 758), False, 'import torch\n'), ((1074, 1102), 'numpy.array', 'np.array', (['[2.0, 1.5, 1.0, 0]'], {}), '([2.0, 1.5, 1.0, 0])\n', (1082, 1102), True, 'import numpy as np\n'), ((1114, 1146), 'torch.normal', 'torch.normal', (['(1)', '(1 / 2.0)', '[N, 1]'], {}), '(1, 1 / 2.0, [N, 1])\n', (1126, 1146), False, 'import torch\n'), ((1511, 1534), 'numpy.array', 'np.array', (['[2.0, 1.0, 0]'], {}), '([2.0, 1.0, 0])\n', (1519, 1534), True, 'import numpy as np\n'), ((1546, 1578), 'torch.normal', 'torch.normal', (['(1)', '(1 / 2.0)', '[N, 1]'], {}), '(1, 1 / 2.0, [N, 1])\n', (1558, 1578), False, 'import torch\n'), ((1589, 1612), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2)'], {}), '(1, 2)\n', (1606, 1612), True, 'import numpy as np\n'), ((1943, 1961), 'numpy.array', 'np.array', (['[2.0, 0]'], {}), '([2.0, 0])\n', (1951, 1961), True, 'import numpy as np\n'), ((1973, 2005), 'torch.normal', 'torch.normal', (['(1)', '(1 / 2.0)', '[N, 1]'], {}), '(1, 1 / 2.0, [N, 1])\n', (1985, 2005), False, 'import torch\n'), ((2347, 2383), 'torch.optim.Adam', 'torch.optim.Adam', (['[phi]'], {'lr': 'args.lrs'}), '([phi], lr=args.lrs)\n', (2363, 2383), False, 'import torch\n'), ((3343, 3377), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (3350, 3377), True, 'import numpy as np\n'), ((3527, 3563), 'torch.optim.Adam', 'torch.optim.Adam', (['[phi]'], {'lr': 'args.lrs'}), '([phi], lr=args.lrs)\n', (3543, 3563), False, 'import torch\n'), ((4578, 4612), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (4585, 4612), True, 'import numpy as np\n'), ((4756, 4792), 'torch.optim.Adam', 'torch.optim.Adam', (['[phi]'], {'lr': 'args.lrs'}), '([phi], lr=args.lrs)\n', (4772, 4792), False, 'import torch\n'), ((5807, 5841), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (5814, 5841), True, 'import numpy as np\n'), ((5984, 6016), 'torch.optim.SGD', 'torch.optim.SGD', (['[phi]'], {'lr': '(0.002)'}), '([phi], lr=0.002)\n', (5999, 6016), False, 'import torch\n'), ((6773, 6807), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (6780, 6807), True, 'import numpy as np\n'), ((6958, 6994), 'torch.optim.Adam', 'torch.optim.Adam', (['[phi]'], {'lr': 'args.lrs'}), '([phi], lr=args.lrs)\n', (6974, 6994), False, 'import torch\n'), ((7833, 7867), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (7840, 7867), True, 'import numpy as np\n'), ((8016, 8052), 'torch.optim.Adam', 'torch.optim.Adam', (['[phi]'], {'lr': 'args.lrs'}), '([phi], lr=args.lrs)\n', (8032, 8052), False, 'import torch\n'), ((8913, 8947), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (8920, 8947), True, 'import numpy as np\n'), ((9281, 9299), 'torch.squeeze', 'torch.squeeze', (['phi'], {}), '(phi)\n', (9294, 9299), False, 'import torch\n'), ((10604, 10629), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10627, 10629), False, 'import argparse\n'), ((11012, 11074), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)', 'precision': '(2)', 'linewidth': '(300)'}), '(suppress=True, precision=2, linewidth=300)\n', (11031, 11074), True, 'import numpy as np\n'), ((11086, 11129), 'os.path.join', 'os.path.join', (['args.path', 'f"""dag_{args.seed}"""'], {}), "(args.path, f'dag_{args.seed}')\n", (11098, 11129), False, 'import os\n'), ((11194, 11219), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (11208, 11219), True, 'import numpy as np\n'), ((11224, 11252), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (11241, 11252), False, 'import torch\n'), ((11257, 11290), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (11279, 11290), False, 'import torch\n'), ((514, 531), 'torch.randn', 'torch.randn', (['N', '(1)'], {}), '(N, 1)\n', (525, 531), False, 'import torch\n'), ((548, 574), 'torch.normal', 'torch.normal', (['(0)', 'e', '[N, 1]'], {}), '(0, e, [N, 1])\n', (560, 574), False, 'import torch\n'), ((586, 615), 'torch.cat', 'torch.cat', (['(x1_e, x2_e, z)', '(1)'], {}), '((x1_e, x2_e, z), 1)\n', (595, 615), False, 'import torch\n'), ((818, 833), 'torch.sin', 'torch.sin', (['x1_e'], {}), '(x1_e)\n', (827, 833), False, 'import torch\n'), ((836, 864), 'torch.normal', 'torch.normal', (['(0)', '(0.5)', '[N, 1]'], {}), '(0, 0.5, [N, 1])\n', (848, 864), False, 'import torch\n'), ((907, 924), 'torch.randn', 'torch.randn', (['N', '(1)'], {}), '(N, 1)\n', (918, 924), False, 'import torch\n'), ((941, 958), 'torch.randn', 'torch.randn', (['N', '(1)'], {}), '(N, 1)\n', (952, 958), False, 'import torch\n'), ((972, 1007), 'torch.cat', 'torch.cat', (['(x1_e, x3_e, x2_e, z)', '(1)'], {}), '((x1_e, x3_e, x2_e, z), 1)\n', (981, 1007), False, 'import torch\n'), ((1203, 1218), 'torch.sin', 'torch.sin', (['x1_e'], {}), '(x1_e)\n', (1212, 1218), False, 'import torch\n'), ((1221, 1249), 'torch.normal', 'torch.normal', (['(0)', '(0.5)', '[N, 1]'], {}), '(0, 0.5, [N, 1])\n', (1233, 1249), False, 'import torch\n'), ((1309, 1335), 'torch.normal', 'torch.normal', (['(0)', 'e', '[N, 1]'], {}), '(0, e, [N, 1])\n', (1321, 1335), False, 'import torch\n'), ((1351, 1368), 'torch.randn', 'torch.randn', (['N', '(1)'], {}), '(N, 1)\n', (1362, 1368), False, 'import torch\n'), ((1380, 1415), 'torch.cat', 'torch.cat', (['(x1_e, x3_e, x2_e, z)', '(1)'], {}), '((x1_e, x3_e, x2_e, z), 1)\n', (1389, 1415), False, 'import torch\n'), ((1690, 1718), 'torch.normal', 'torch.normal', (['(0)', '(0.5)', '[N, 1]'], {}), '(0, 0.5, [N, 1])\n', (1702, 1718), False, 'import torch\n'), ((1759, 1776), 'torch.randn', 'torch.randn', (['N', '(1)'], {}), '(N, 1)\n', (1770, 1776), False, 'import torch\n'), ((1793, 1810), 'torch.randn', 'torch.randn', (['N', '(1)'], {}), '(N, 1)\n', (1804, 1810), False, 'import torch\n'), ((1847, 1876), 'torch.cat', 'torch.cat', (['(x1_e, x2_e, z)', '(1)'], {}), '((x1_e, x2_e, z), 1)\n', (1856, 1876), False, 'import torch\n'), ((2028, 2045), 'torch.randn', 'torch.randn', (['N', '(1)'], {}), '(N, 1)\n', (2039, 2045), False, 'import torch\n'), ((2074, 2091), 'torch.randn', 'torch.randn', (['N', '(1)'], {}), '(N, 1)\n', (2085, 2091), False, 'import torch\n'), ((2104, 2127), 'torch.cat', 'torch.cat', (['(x1_e, z)', '(1)'], {}), '((x1_e, z), 1)\n', (2113, 2127), False, 'import torch\n'), ((2228, 2282), 'torch.normal', 'torch.normal', (['(1)', '(0.2)', '[environments[0][0].shape[1], 1]'], {}), '(1, 0.2, [environments[0][0].shape[1], 1])\n', (2240, 2282), False, 'import torch\n'), ((2314, 2333), 'torch.Tensor', 'torch.Tensor', (['[1.0]'], {}), '([1.0])\n', (2326, 2333), False, 'import torch\n'), ((3463, 3517), 'torch.normal', 'torch.normal', (['(1)', '(0.2)', '[environments[0][0].shape[1], 1]'], {}), '(1, 0.2, [environments[0][0].shape[1], 1])\n', (3475, 3517), False, 'import torch\n'), ((4692, 4746), 'torch.normal', 'torch.normal', (['(1)', '(0.2)', '[environments[0][0].shape[1], 1]'], {}), '(1, 0.2, [environments[0][0].shape[1], 1])\n', (4704, 4746), False, 'import torch\n'), ((5338, 5357), 'torch.sqrt', 'torch.sqrt', (['penalty'], {}), '(penalty)\n', (5348, 5357), False, 'import torch\n'), ((5920, 5974), 'torch.normal', 'torch.normal', (['(1)', '(0.2)', '[environments[0][0].shape[1], 1]'], {}), '(1, 0.2, [environments[0][0].shape[1], 1])\n', (5932, 5974), False, 'import torch\n'), ((6894, 6948), 'torch.normal', 'torch.normal', (['(1)', '(0.2)', '[environments[0][0].shape[1], 1]'], {}), '(1, 0.2, [environments[0][0].shape[1], 1])\n', (6906, 6948), False, 'import torch\n'), ((7282, 7300), 'torch.stack', 'torch.stack', (['error'], {}), '(error)\n', (7293, 7300), False, 'import torch\n'), ((7952, 8006), 'torch.normal', 'torch.normal', (['(1)', '(0.2)', '[environments[0][0].shape[1], 1]'], {}), '(1, 0.2, [environments[0][0].shape[1], 1])\n', (7964, 8006), False, 'import torch\n'), ((8340, 8358), 'torch.stack', 'torch.stack', (['error'], {}), '(error)\n', (8351, 8358), False, 'import torch\n'), ((9229, 9257), 'torch.inverse', 'torch.inverse', (['(Gs[0] - Gs[1])'], {}), '(Gs[0] - Gs[1])\n', (9242, 9257), False, 'import torch\n'), ((11141, 11161), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (11155, 11161), False, 'import os\n'), ((11171, 11188), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (11182, 11188), False, 'import os\n'), ((11598, 11632), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (11614, 11632), False, 'import torch\n'), ((2705, 2761), 'torch.autograd.grad', 'torch.autograd.grad', (['error_e', 'dummy_w'], {'create_graph': '(True)'}), '(error_e, dummy_w, create_graph=True)\n', (2724, 2761), False, 'import torch\n'), ((2785, 2814), 'torch.square', 'torch.square', (['phi_grad_out[0]'], {}), '(phi_grad_out[0])\n', (2797, 2814), False, 'import torch\n'), ((3111, 3145), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (3118, 3145), True, 'import numpy as np\n'), ((3875, 3927), 'torch.autograd.grad', 'torch.autograd.grad', (['error_e', 'phi'], {'create_graph': '(True)'}), '(error_e, phi, create_graph=True)\n', (3894, 3927), False, 'import torch\n'), ((4331, 4365), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (4338, 4365), True, 'import numpy as np\n'), ((5103, 5155), 'torch.autograd.grad', 'torch.autograd.grad', (['error_e', 'phi'], {'create_graph': '(True)'}), '(error_e, phi, create_graph=True)\n', (5122, 5155), False, 'import torch\n'), ((5564, 5598), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (5571, 5598), True, 'import numpy as np\n'), ((6530, 6564), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (6537, 6564), True, 'import numpy as np\n'), ((7601, 7635), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (7608, 7635), True, 'import numpy as np\n'), ((8681, 8715), 'numpy.mean', 'np.mean', (['estimate_r[-100:]'], {'axis': '(0)'}), '(estimate_r[-100:], axis=0)\n', (8688, 8715), True, 'import numpy as np\n'), ((11885, 11914), 'os.path.join', 'os.path.join', (['path', '"""DAG.pkl"""'], {}), "(path, 'DAG.pkl')\n", (11897, 11914), False, 'import os\n'), ((768, 785), 'torch.distributions.Uniform', 'td.Uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (778, 785), True, 'from torch import distributions as td\n'), ((1153, 1170), 'torch.distributions.Uniform', 'td.Uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1163, 1170), True, 'from torch import distributions as td\n'), ((1630, 1653), 'torch.distributions.Uniform', 'td.Uniform', (['(0)', 'x1_range'], {}), '(0, x1_range)\n', (1640, 1653), True, 'from torch import distributions as td\n'), ((5178, 5210), 'torch.square', 'torch.square', (['phi_grad_out[0][0]'], {}), '(phi_grad_out[0][0])\n', (5190, 5210), False, 'import torch\n'), ((4011, 4051), 'torch.sum', 'torch.sum', (['(phi_grad_out[0][1:] * phi[1:])'], {}), '(phi_grad_out[0][1:] * phi[1:])\n', (4020, 4051), False, 'import torch\n'), ((5241, 5284), 'torch.square', 'torch.square', (['(phi_grad_out[0][1:] * phi[1:])'], {}), '(phi_grad_out[0][1:] * phi[1:])\n', (5253, 5284), False, 'import torch\n'), ((9112, 9129), 'numpy.transpose', 'np.transpose', (['x_e'], {}), '(x_e)\n', (9124, 9129), True, 'import numpy as np\n'), ((9173, 9190), 'numpy.transpose', 'np.transpose', (['x_e'], {}), '(x_e)\n', (9185, 9190), True, 'import numpy as np\n'), ((3196, 3221), 'numpy.abs', 'np.abs', (['(phi_new - phi_old)'], {}), '(phi_new - phi_old)\n', (3202, 3221), True, 'import numpy as np\n'), ((4416, 4441), 'numpy.abs', 'np.abs', (['(phi_new - phi_old)'], {}), '(phi_new - phi_old)\n', (4422, 4441), True, 'import numpy as np\n'), ((5649, 5674), 'numpy.abs', 'np.abs', (['(phi_new - phi_old)'], {}), '(phi_new - phi_old)\n', (5655, 5674), True, 'import numpy as np\n'), ((6615, 6640), 'numpy.abs', 'np.abs', (['(phi_new - phi_old)'], {}), '(phi_new - phi_old)\n', (6621, 6640), True, 'import numpy as np\n'), ((7686, 7711), 'numpy.abs', 'np.abs', (['(phi_new - phi_old)'], {}), '(phi_new - phi_old)\n', (7692, 7711), True, 'import numpy as np\n'), ((8766, 8791), 'numpy.abs', 'np.abs', (['(phi_new - phi_old)'], {}), '(phi_new - phi_old)\n', (8772, 8791), True, 'import numpy as np\n')] |
import numpy as np
import pickle
"""
The first part of this file is to test if the data.py prepare the data correctly
The second part of this file is to test if the data_FlIC_plus.py prepare the data correctly
"""
### The first part
n_joint = 9 # the number of joint that you want to display
y_test = np.load('y_test_flic.npy')
x_test = np.load('x_test_flic.npy')
print('x_test shape is', x_test.shape)
i = np.random.randint(0, high=x_test.shape[0])
print('Show the %dth image and the heat map for n_joint:' % i)
y_test = y_test.astype(np.float32)
y_test = y_test / 256
coords = np.zeros([2, n_joint])
img = x_test[i, :, :, :]
img = np.reshape(img, (x_test.shape[1], x_test.shape[2], x_test.shape[3]))
for joint in range(n_joint):
print(joint)
hmap = y_test[i, :, :, joint]
hmap = np.reshape(hmap, (y_test.shape[1], y_test.shape[2]))
print(hmap.shape)
x, y = np.where(hmap == np.max(hmap))
print(x, y)
coords[:, joint] = [x, y]
coords = coords * 8
print('coords:', coords)
with open('pairwise_distribution.pickle', 'rb') as handle:
pairwise_distribution = pickle.load(handle)
import matplotlib.pyplot as plt
# plt.figure(1)
# plt.imshow((img))
# plt.figure(2)
# plt.imshow((hmap))
for name in ['nose_torso', 'rsho_torso', 'relb_torso', 'rwri_torso', 'rhip_torso']:
plt.imshow(pairwise_distribution[name])
plt.savefig('img/0epoch_' + name + '.png', dpi=300)
plt.clf()
### The second part
n_joint = 9 # the number of joint that you want to display
y_test = np.load('y_test_flic_plus.npy')
x_test = np.load('x_test_flic_plus.npy')
print('x_test shape is', x_test.shape)
i = np.random.randint(0, high=x_test.shape[0])
print('Show the %dth image and the heat map for n_joint:' % i)
y_test = y_test.astype(np.float32)
y_test = y_test / 256
coords = np.zeros([2, n_joint])
img = x_test[i, :, :, :]
img = np.reshape(img, (x_test.shape[1], x_test.shape[2], x_test.shape[3]))
for joint in range(n_joint):
print(joint)
hmap = y_test[i, :, :, joint]
hmap = np.reshape(hmap, (y_test.shape[1], y_test.shape[2]))
print(hmap.shape)
x, y = np.where(hmap == np.max(hmap))
print(x, y)
coords[:, joint] = [x, y]
coords = coords * 8
print('coords:', coords)
with open('pairwise_distribution_plus.pickle', 'rb') as handle:
pairwise_distribution = pickle.load(handle)
import matplotlib.pyplot as plt
plt.figure(1)
plt.imshow((img))
plt.figure(2)
plt.imshow((hmap))
plt.figure(3)
plt.imshow((pairwise_distribution['lwri_torso']))
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.reshape",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"pickle.load",
"numpy.max",
"numpy.random.randint",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.load",
"matplotlib.pyplot.show"
] | [((302, 328), 'numpy.load', 'np.load', (['"""y_test_flic.npy"""'], {}), "('y_test_flic.npy')\n", (309, 328), True, 'import numpy as np\n'), ((338, 364), 'numpy.load', 'np.load', (['"""x_test_flic.npy"""'], {}), "('x_test_flic.npy')\n", (345, 364), True, 'import numpy as np\n'), ((408, 450), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': 'x_test.shape[0]'}), '(0, high=x_test.shape[0])\n', (425, 450), True, 'import numpy as np\n'), ((582, 604), 'numpy.zeros', 'np.zeros', (['[2, n_joint]'], {}), '([2, n_joint])\n', (590, 604), True, 'import numpy as np\n'), ((636, 704), 'numpy.reshape', 'np.reshape', (['img', '(x_test.shape[1], x_test.shape[2], x_test.shape[3])'], {}), '(img, (x_test.shape[1], x_test.shape[2], x_test.shape[3]))\n', (646, 704), True, 'import numpy as np\n'), ((1507, 1538), 'numpy.load', 'np.load', (['"""y_test_flic_plus.npy"""'], {}), "('y_test_flic_plus.npy')\n", (1514, 1538), True, 'import numpy as np\n'), ((1548, 1579), 'numpy.load', 'np.load', (['"""x_test_flic_plus.npy"""'], {}), "('x_test_flic_plus.npy')\n", (1555, 1579), True, 'import numpy as np\n'), ((1623, 1665), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': 'x_test.shape[0]'}), '(0, high=x_test.shape[0])\n', (1640, 1665), True, 'import numpy as np\n'), ((1797, 1819), 'numpy.zeros', 'np.zeros', (['[2, n_joint]'], {}), '([2, n_joint])\n', (1805, 1819), True, 'import numpy as np\n'), ((1851, 1919), 'numpy.reshape', 'np.reshape', (['img', '(x_test.shape[1], x_test.shape[2], x_test.shape[3])'], {}), '(img, (x_test.shape[1], x_test.shape[2], x_test.shape[3]))\n', (1861, 1919), True, 'import numpy as np\n'), ((2366, 2379), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2376, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2380, 2395), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2390, 2395), True, 'import matplotlib.pyplot as plt\n'), ((2398, 2411), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2408, 2411), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2428), 'matplotlib.pyplot.imshow', 'plt.imshow', (['hmap'], {}), '(hmap)\n', (2422, 2428), True, 'import matplotlib.pyplot as plt\n'), ((2431, 2444), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (2441, 2444), True, 'import matplotlib.pyplot as plt\n'), ((2445, 2492), 'matplotlib.pyplot.imshow', 'plt.imshow', (["pairwise_distribution['lwri_torso']"], {}), "(pairwise_distribution['lwri_torso'])\n", (2455, 2492), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2503, 2505), True, 'import matplotlib.pyplot as plt\n'), ((797, 849), 'numpy.reshape', 'np.reshape', (['hmap', '(y_test.shape[1], y_test.shape[2])'], {}), '(hmap, (y_test.shape[1], y_test.shape[2]))\n', (807, 849), True, 'import numpy as np\n'), ((1093, 1112), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1104, 1112), False, 'import pickle\n'), ((1307, 1346), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pairwise_distribution[name]'], {}), '(pairwise_distribution[name])\n', (1317, 1346), True, 'import matplotlib.pyplot as plt\n'), ((1351, 1402), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('img/0epoch_' + name + '.png')"], {'dpi': '(300)'}), "('img/0epoch_' + name + '.png', dpi=300)\n", (1362, 1402), True, 'import matplotlib.pyplot as plt\n'), ((1407, 1416), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1414, 1416), True, 'import matplotlib.pyplot as plt\n'), ((2012, 2064), 'numpy.reshape', 'np.reshape', (['hmap', '(y_test.shape[1], y_test.shape[2])'], {}), '(hmap, (y_test.shape[1], y_test.shape[2]))\n', (2022, 2064), True, 'import numpy as np\n'), ((2313, 2332), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2324, 2332), False, 'import pickle\n'), ((900, 912), 'numpy.max', 'np.max', (['hmap'], {}), '(hmap)\n', (906, 912), True, 'import numpy as np\n'), ((2115, 2127), 'numpy.max', 'np.max', (['hmap'], {}), '(hmap)\n', (2121, 2127), True, 'import numpy as np\n')] |
import torch
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import argparse
import utils
import dataloader
from lpgnn_wrapper import GNNWrapper, SemiSupGNNWrapper
#
# # fix random seeds for reproducibility
# SEED = 123
# torch.manual_seed(SEED)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# np.random.seed(SEED)
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
help='input batch size for testing (default: 100)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--cuda_dev', type=int, default=0,
help='select specific CUDA device for training')
parser.add_argument('--n_gpu_use', type=int, default=1,
help='select number of CUDA device for training')
# parser.add_argument('--seed', type=int, default=1, metavar='S',
# help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='logging training status cadency')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--tensorboard', action='store_true', default=True,
help='For logging the model in tensorboard')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
if not use_cuda:
args.n_gpu_use = 0
device = utils.prepare_device(n_gpu_use=args.n_gpu_use, gpu_id=args.cuda_dev)
# kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# torch.manual_seed(args.seed)
# # fix random seeds for reproducibility
# SEED = 123
# torch.manual_seed(SEED)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# np.random.seed(SEED)
# configugations
cfg = GNNWrapper.Config()
cfg.use_cuda = use_cuda
cfg.device = device
cfg.log_interval = args.log_interval
cfg.tensorboard = args.tensorboard
# cfg.batch_size = args.batch_size
# cfg.test_batch_size = args.test_batch_size
# cfg.momentum = args.momentum
cfg.dataset_path = './data'
cfg.epochs = args.epochs
cfg.lrw = args.lr
cfg.activation = nn.Tanh()
cfg.state_transition_hidden_dims = [4]
cfg.output_function_hidden_dims = []
cfg.state_dim = 2
cfg.max_iterations = 50
cfg.convergence_threshold = 0.001
cfg.graph_based = False
cfg.log_interval = 10
cfg.task_type = "semisupervised"
cfg.lrw = 0.01
# model creation
model = SemiSupGNNWrapper(cfg)
# dataset creation
dset = dataloader.get_karate(aggregation_type="sum", sparse_matrix=True) # generate the dataset
#dset = dataloader.get_twochainsSSE(aggregation_type="sum", percentage=0.1, sparse_matrix=True) # generate the dataset
model(dset) # dataset initalization into the GNN
# training code
# plotting utilities
all_states = []
all_outs = []
for epoch in range(1, args.epochs + 1):
out = model.train_step(epoch)
all_states.append(model.gnn.converged_states.detach().to("cpu"))
all_outs.append(out.detach().to("cpu"))
if epoch % 10 == 0:
model.test_step(epoch)
# model.test_step()
# if args.save_model:
# torch.save(model.gnn.state_dict(), "mnist_cnn.pt")
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import networkx as nx
nx_G = nx.karate_club_graph().to_directed()
def draw(i):
clscolor = ['#FF0000', '#0000FF', '#FF00FF', '#00FF00']
pos = {}
colors = []
for v in range(34):
pos[v] = all_states[i][v].numpy()
cls = all_outs[i][v].argmax(axis=-1)
# colors.append(clscolor[cls])
# print(clscolor[targets[v]])
colors.append(clscolor[dset.targets[v]])
ax.cla()
ax.axis('off')
ax.set_title('Epoch: %d' % i)
# node_sha = ["o" for i in range(34)]
# for j in idx_train:
# node_sha[j] = "s"
node_sizes = np.full((34), 200)
node_sizes[dset.idx_train.detach().to("cpu").numpy()] = 350
nx.draw_networkx(nx_G.to_undirected(), pos, node_color=colors,
with_labels=True, node_size=node_sizes, ax=ax)
# nx.draw_networkx(nx_G.to_undirected().subgraph(idx_train), pos, node_color=[colors[k] for k in idx_train], node_shape='s',
# with_labels=True, node_size=300, ax=ax)
fig = plt.figure(dpi=150)
fig.clf()
ax = fig.subplots()
draw(0) # draw the prediction of the first epoch
plt.close()
ani = animation.FuncAnimation(fig, draw, frames=len(all_states), interval=200)
ani.save('learning.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
if __name__ == '__main__':
main()
| [
"dataloader.get_karate",
"utils.prepare_device",
"lpgnn_wrapper.GNNWrapper.Config",
"torch.nn.Tanh",
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"torch.cuda.is_available",
"networkx.karate_club_graph",
"numpy.full",
"lpgnn_wrapper.SemiSupGNNWrapper"
] | [((529, 575), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch"""'}), "(description='PyTorch')\n", (552, 575), False, 'import argparse\n'), ((2405, 2473), 'utils.prepare_device', 'utils.prepare_device', ([], {'n_gpu_use': 'args.n_gpu_use', 'gpu_id': 'args.cuda_dev'}), '(n_gpu_use=args.n_gpu_use, gpu_id=args.cuda_dev)\n', (2425, 2473), False, 'import utils\n'), ((2828, 2847), 'lpgnn_wrapper.GNNWrapper.Config', 'GNNWrapper.Config', ([], {}), '()\n', (2845, 2847), False, 'from lpgnn_wrapper import GNNWrapper, SemiSupGNNWrapper\n'), ((3210, 3219), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3217, 3219), True, 'import torch.nn as nn\n'), ((3537, 3559), 'lpgnn_wrapper.SemiSupGNNWrapper', 'SemiSupGNNWrapper', (['cfg'], {}), '(cfg)\n', (3554, 3559), False, 'from lpgnn_wrapper import GNNWrapper, SemiSupGNNWrapper\n'), ((3594, 3659), 'dataloader.get_karate', 'dataloader.get_karate', ([], {'aggregation_type': '"""sum"""', 'sparse_matrix': '(True)'}), "(aggregation_type='sum', sparse_matrix=True)\n", (3615, 3659), False, 'import dataloader\n'), ((5514, 5533), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(150)'}), '(dpi=150)\n', (5524, 5533), True, 'import matplotlib.pyplot as plt\n'), ((5630, 5641), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5639, 5641), True, 'import matplotlib.pyplot as plt\n'), ((2317, 2342), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2340, 2342), False, 'import torch\n'), ((5081, 5097), 'numpy.full', 'np.full', (['(34)', '(200)'], {}), '(34, 200)\n', (5088, 5097), True, 'import numpy as np\n'), ((4445, 4467), 'networkx.karate_club_graph', 'nx.karate_club_graph', ([], {}), '()\n', (4465, 4467), True, 'import networkx as nx\n')] |
# first-order finite-difference implicit method for linear advection
#
# We are solving a_t + u a_x = 0
#
# The upwinded implicit update appears as:
#
# n+1 n+1 n
# -C a + (1 - C) a = a
# i-1 i i
#
# where C is the CFL number
#
# We use a periodic grid with N points, 0, ..., N-1, with the data
# located at those points. This means that since 0 and N-1 are on
# the boundary, they are the same point. Therefore, we only need
# to update points 1, ..., N-1
#
# No ghost points are used here.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
class FDgrid:
def __init__(self, nx, xmin=0.0, xmax=1.0):
self.xmin = xmin
self.xmax = xmax
self.nx = nx
# python is zero-based. We are assuming periodic BCs, so
# points 0 and N-1 are the same. Set some integer indices to
# allow us to easily access points 1 through N-1. Point 0
# won't be explicitly updated, but rather filled by the BC
# routine.
self.ilo = 1
self.ihi = nx-1
# physical coords
self.dx = (xmax - xmin)/(nx-1)
self.x = xmin + np.arange(nx)*self.dx
# storage for the solution
self.a = np.zeros((nx), dtype=np.float64)
self.ainit = np.zeros((nx), dtype=np.float64)
def scratchArray(self):
""" return a scratch array dimensioned for our grid """
return np.zeros((self.nx), dtype=np.float64)
def fillBCs(self):
""" we don't explicitly update point 0, since it is identical
to N-1, so fill it here """
self.a[0] = self.a[self.ihi]
def evolve(nx, C, u, tmax):
# create the grid
g = FDgrid(nx)
# time info
dt = C*g.dx/u
t = 0.0
# initialize the data -- tophat
g.a[np.logical_and(g.x >= 0.333, g.x <= 0.666)] = 1.0
g.ainit = g.a.copy()
# evolution loop
A = np.zeros((g.nx-1, g.nx-1), dtype=np.float64)
# fill the boundary conditions
g.fillBCs()
while t < tmax:
# create the matrix
# loop over rows [ilo,ihi] and construct the matrix. This will
# be almost bidiagonal, but with the upper right entry also
# nonzero.
for i in range(g.nx-1):
A[i,i] = 1.0 + C
A[i,i-1] = -C
# create the RHS -- this holds all entries except for a[0]
b = g.a[g.ilo:g.ihi+1]
# solve the system
anew = np.linalg.solve(A, b)
g.a[g.ilo:g.ihi+1] = anew[:]
g.fillBCs()
t += dt
return g
u = 1.0
tmax = 1.0/u
nx = 65
CFL = [0.5, 1.0, 10.0]
for n, C in enumerate(CFL):
g = evolve(nx, C, u, tmax)
if n == 0:
plt.plot(g.x[g.ilo:g.ihi+1], g.ainit[g.ilo:g.ihi+1], ls=":", label="exact")
plt.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], label="$C = %3.1f$" % (C))
#plt.title("N = %d" % (nx))
plt.xlabel("$x$", fontsize=16)
plt.ylabel("$a$", fontsize=16)
plt.legend(frameon=False, loc="best")
plt.tight_layout()
plt.savefig("fdadvect-implicit.pdf")
| [
"numpy.linalg.solve",
"matplotlib.pyplot.savefig",
"numpy.logical_and",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.legend"
] | [((3002, 3032), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {'fontsize': '(16)'}), "('$x$', fontsize=16)\n", (3012, 3032), True, 'import matplotlib.pyplot as plt\n'), ((3033, 3063), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$a$"""'], {'fontsize': '(16)'}), "('$a$', fontsize=16)\n", (3043, 3063), True, 'import matplotlib.pyplot as plt\n'), ((3066, 3103), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)', 'loc': '"""best"""'}), "(frameon=False, loc='best')\n", (3076, 3103), True, 'import matplotlib.pyplot as plt\n'), ((3105, 3123), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3121, 3123), True, 'import matplotlib.pyplot as plt\n'), ((3125, 3161), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fdadvect-implicit.pdf"""'], {}), "('fdadvect-implicit.pdf')\n", (3136, 3161), True, 'import matplotlib.pyplot as plt\n'), ((2014, 2062), 'numpy.zeros', 'np.zeros', (['(g.nx - 1, g.nx - 1)'], {'dtype': 'np.float64'}), '((g.nx - 1, g.nx - 1), dtype=np.float64)\n', (2022, 2062), True, 'import numpy as np\n'), ((2895, 2972), 'matplotlib.pyplot.plot', 'plt.plot', (['g.x[g.ilo:g.ihi + 1]', 'g.a[g.ilo:g.ihi + 1]'], {'label': "('$C = %3.1f$' % C)"}), "(g.x[g.ilo:g.ihi + 1], g.a[g.ilo:g.ihi + 1], label='$C = %3.1f$' % C)\n", (2903, 2972), True, 'import matplotlib.pyplot as plt\n'), ((1340, 1370), 'numpy.zeros', 'np.zeros', (['nx'], {'dtype': 'np.float64'}), '(nx, dtype=np.float64)\n', (1348, 1370), True, 'import numpy as np\n'), ((1394, 1424), 'numpy.zeros', 'np.zeros', (['nx'], {'dtype': 'np.float64'}), '(nx, dtype=np.float64)\n', (1402, 1424), True, 'import numpy as np\n'), ((1535, 1570), 'numpy.zeros', 'np.zeros', (['self.nx'], {'dtype': 'np.float64'}), '(self.nx, dtype=np.float64)\n', (1543, 1570), True, 'import numpy as np\n'), ((1908, 1950), 'numpy.logical_and', 'np.logical_and', (['(g.x >= 0.333)', '(g.x <= 0.666)'], {}), '(g.x >= 0.333, g.x <= 0.666)\n', (1922, 1950), True, 'import numpy as np\n'), ((2555, 2576), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (2570, 2576), True, 'import numpy as np\n'), ((2814, 2893), 'matplotlib.pyplot.plot', 'plt.plot', (['g.x[g.ilo:g.ihi + 1]', 'g.ainit[g.ilo:g.ihi + 1]'], {'ls': '""":"""', 'label': '"""exact"""'}), "(g.x[g.ilo:g.ihi + 1], g.ainit[g.ilo:g.ihi + 1], ls=':', label='exact')\n", (2822, 2893), True, 'import matplotlib.pyplot as plt\n'), ((1265, 1278), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (1274, 1278), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
image_size = (224, 224, 3)
def create_white_noise_dataset(n=1):
# creates uniform white noise
seeds = [i for i in range(n)]
for s in seeds:
rs = np.random.RandomState(seed=s)
image = rs.uniform(low=0, high=255, size=image_size).astype(np.uint8)
image = Image.fromarray(image, mode="RGB")
image.save("white_noise_images/wn_%d.jpg" % s)
def create_anime_dataset(n=1):
# https://www.gwern.net/Danbooru2020#kaggle
# make 224x224
source = "/home/gabi/anime_dataset/danbooru2020/512px/250"
if n > 250:
n = 250
names = os.listdir(source)[:n]
for name in names:
p = os.path.join(source, name)
im = Image.open(p)
if im.size != (512, 512):
print("image %s is not 512x512" % name)
left = (512 - 224) // 2
top = left
right = left + 224
bottom = right
im = im.crop((left, top, right, bottom))
im.save("danbooru2020/anime_%s.jpg" % name)
# create_white_noise_dataset(250)
# create_anime_dataset(250) | [
"PIL.Image.fromarray",
"os.listdir",
"PIL.Image.open",
"matplotlib.use",
"os.path.join",
"numpy.random.RandomState"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (32, 39), False, 'import matplotlib\n'), ((293, 322), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 's'}), '(seed=s)\n', (314, 322), True, 'import numpy as np\n'), ((417, 451), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {'mode': '"""RGB"""'}), "(image, mode='RGB')\n", (432, 451), False, 'from PIL import Image\n'), ((714, 732), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (724, 732), False, 'import os\n'), ((773, 799), 'os.path.join', 'os.path.join', (['source', 'name'], {}), '(source, name)\n', (785, 799), False, 'import os\n'), ((813, 826), 'PIL.Image.open', 'Image.open', (['p'], {}), '(p)\n', (823, 826), False, 'from PIL import Image\n')] |
import keras
from keras import layers
from keras import datasets
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
import numpy as np
from dnpy.layers import *
from dnpy.net import *
from dnpy.optimizers import *
from dnpy.regularizers import *
from dnpy import metrics, losses
from dnpy import utils
# For debugging
np.random.seed(42)
def vectorize(samples, length, dimension):
results = np.zeros((len(samples), length, dimension))
for i, words_idxs in enumerate(samples):
results[i, words_idxs] = 1
return results
def main():
max_size = 500
max_length = 150
max_words = 1000
# Get dataset
(x_train, y_train), (x_test, y_test) = datasets.imdb.load_data(maxlen=max_length, num_words=max_words)
x_train, y_train, x_test, y_test = x_train[:max_size], y_train[:max_size], x_test[:max_size], y_test[:max_size]
# Pad sequences
x_train = pad_sequences(x_train, maxlen=max_length, padding='post')
x_test = pad_sequences(x_test, maxlen=max_length, padding='post')
# x_train = np.expand_dims(x_train, axis=2)
# x_test = np.expand_dims(x_test, axis=2)
y_train = np.expand_dims(y_train, axis=1)
y_test = np.expand_dims(y_test, axis=1)
# Params *********************************
batch_size = int(len(x_train) / 8)
epochs = 10
# inputs = keras.Input(shape=x_train.shape[1:])
# x = layers.Embedding(input_dim=max_words, output_dim=8)(inputs)
# # x = layers.SimpleRNN(32)(inputs)
# # Add a classifier
# x = layers.Flatten()(x)
# x = layers.Dense(64, activation="relu")(x)
# outputs = layers.Dense(1, activation="sigmoid")(x)
# model = keras.Model(inputs, outputs)
# model.summary()
#
# model.compile("adam", "binary_crossentropy", metrics=["accuracy"])
# model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs)
# Define architecture
l_in = Input(shape=x_train.shape[1:])
l = l_in
l = Embedding(l, input_dim=max_words, output_dim=8, input_length=max_length)
l = Flatten(l)
l = Dense(l, units=64)
l = Relu(l)
l = Dense(l, units=1)
l_out = Sigmoid(l)
# Build network
mymodel = Net()
mymodel.build(
l_in=[l_in],
l_out=[l_out],
optimizer=Adam(lr=10e-3),
losses=[losses.BinaryCrossEntropy()],
metrics=[[metrics.BinaryAccuracy()]],
debug=False,
smart_derivatives=True,
)
# Print model
mymodel.summary()
# Train
mymodel.fit([x_train], [y_train],
x_test=None, y_test=None,
batch_size=batch_size, epochs=epochs,
evaluate_epoch=False,
print_rate=1)
sdfsd = 33
asdasd = 33
if __name__ == "__main__":
main()
| [
"keras.datasets.imdb.load_data",
"dnpy.losses.BinaryCrossEntropy",
"numpy.random.seed",
"numpy.expand_dims",
"dnpy.metrics.BinaryAccuracy",
"keras.preprocessing.sequence.pad_sequences"
] | [((369, 387), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (383, 387), True, 'import numpy as np\n'), ((727, 790), 'keras.datasets.imdb.load_data', 'datasets.imdb.load_data', ([], {'maxlen': 'max_length', 'num_words': 'max_words'}), '(maxlen=max_length, num_words=max_words)\n', (750, 790), False, 'from keras import datasets\n'), ((942, 999), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_train'], {'maxlen': 'max_length', 'padding': '"""post"""'}), "(x_train, maxlen=max_length, padding='post')\n", (955, 999), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1013, 1069), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_test'], {'maxlen': 'max_length', 'padding': '"""post"""'}), "(x_test, maxlen=max_length, padding='post')\n", (1026, 1069), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1179, 1210), 'numpy.expand_dims', 'np.expand_dims', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (1193, 1210), True, 'import numpy as np\n'), ((1224, 1254), 'numpy.expand_dims', 'np.expand_dims', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (1238, 1254), True, 'import numpy as np\n'), ((2327, 2354), 'dnpy.losses.BinaryCrossEntropy', 'losses.BinaryCrossEntropy', ([], {}), '()\n', (2352, 2354), False, 'from dnpy import metrics, losses\n'), ((2375, 2399), 'dnpy.metrics.BinaryAccuracy', 'metrics.BinaryAccuracy', ([], {}), '()\n', (2397, 2399), False, 'from dnpy import metrics, losses\n')] |
from __future__ import print_function
from __future__ import division
import random
import time
import itertools as it
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from utils import load_data
from train_lstm import LSTM
from train_tdlstm import TDLSTM
from train_tclstm import TCLSTM
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval
from skopt import gp_minimize, forest_minimize, gbrt_minimize
from skopt.space import Categorical
def random_search(param_grid, sampsize=None):
expanded_param_grid = expand_grid(param_grid)
if sampsize == None:
sampsize = int(len(expanded_param_grid) / 2.0)
samp = random.sample(expanded_param_grid, sampsize)
return samp
def expand_grid(param_grid):
varNames = sorted(param_grid)
return [dict(zip(varNames, prod))
for prod in it.product(*(param_grid[varName]
for varName in varNames))]
def skopt_search(args, data, model, param_grid, skopt_method, n_calls):
param_keys, param_vecs = zip(*param_grid.items())
param_keys = list(param_keys)
param_vecs = list(param_vecs)
def skopt_scorer(param_vec):
params = dict(zip(param_keys, param_vec))
args.num_hidden = params['num_hidden']
args.dropout_output = params['dropout_output']
args.dropout_input = params['dropout_input']
args.clip_norm = params['clip_norm']
args.batch_size = params['batch_size']
print(args)
print()
scores = run_network(args, data, model, tuning=args.tune)
test_score, eval_score = scores
tf.reset_default_graph()
eval_score = -eval_score[0]
return eval_score
outcome = skopt_method(skopt_scorer, list(param_vecs), n_calls=n_calls)
results = []
for err, param_vec in zip(outcome.func_vals, outcome.x_iters):
params = dict(zip(param_keys, param_vec))
results.append({'loss': err, 'params': params})
return results
def skoptTUNE(args, model, n_calls):
"""
Hyper-parameter optimization using scikit-opt.
It has 3 algorithms: forest_minimize (decision-tree regression search);
gbrt_minimize (gradient-boosted-tree search);
and hp_minimize (Gaussian process regression search).
"""
hyperparameters = {
'batch_size': (40, 120),
'num_hidden': (100, 500),
'dropout_output': (0.3, 1.0),
'dropout_input': (0.3, 1.0),
'clip_norm': (0.5, 1.0),
}
data = load_data(args, args.data, saved=args.load_data)
all_res = skopt_search(args, data, model, hyperparameters, gp_minimize, n_calls=n_calls)
print(all_res)
def hyperopt_search(args, data, model, param_grid, max_evals):
def objective(param_grid):
args.num_hidden = param_grid['num_hidden']
args.dropout_output = param_grid['dropout_output']
args.dropout_input = param_grid['dropout_input']
args.clip_norm = param_grid['clip_norm']
args.batch_size = param_grid['batch_size']
# args.learning_rate = param_grid['learning_rate']
print(args)
print()
scores = run_network(args, data, model, tuning=args.tune)
test_score, eval_score = scores
tf.reset_default_graph()
eval_score = -eval_score[0]
return {'loss': eval_score, 'params': args, 'status': STATUS_OK}
trials = Trials()
results = fmin(
objective, param_grid, algo=tpe.suggest,
trials=trials, max_evals=max_evals)
return results, trials.results
def hyperoptTUNE(args, model, n_calls):
"""
Search the hyper-parameter space according to the tree of Parzen estimators;
a Bayesian approach.
"""
hyperparameters = {
'batch_size': hp.choice('batch_size', range(40, 130, 20)),
'num_hidden': hp.quniform('num_hidden', 100, 500, 1),
# 'learning_rate': hp.choice('learning_rate', [0.0005]),
'dropout_output': hp.quniform('dropout_output', 0.3, 1.0, 0.1),
'dropout_input': hp.quniform('dropout_input', 0.3, 1.0, 0.1),
'clip_norm': hp.quniform('clip_norm', 0.5, 1.0, 0.1),
}
data = load_data(args, args.data, saved=args.load_data)
best_params, all_res = hyperopt_search(args, data, model, hyperparameters, max_evals=n_calls)
print(best_params)
def TUNE(args, model, mode, n_calls=5):
hyperparameters_all = {
'batch_size': range(40, 130, 20),
'seq_len': [42],
'num_hidden': np.random.randint(100, 501, 10),
'learning_rate': [0.0005],
'dropout_output': np.arange(0.3, 1.1, 0.1),
'dropout_input': np.arange(0.3, 1.1, 0.1),
'clip_norm': np.arange(0.5, 1.01, 0.1),
}
maxx = 0
data = load_data(args, args.data, saved=args.load_data)
if mode == 'rand':
samp = random_search(hyperparameters_all, n_calls) #random search
else:
samp = expand_grid(hyperparameters_all) #grid-search
for hyperparameters in samp:
print("Evaluating hyperparameters:", hyperparameters)
for attr, value in hyperparameters.items():
setattr(args, attr, value)
scores = run_network(args, data, model, tuning=args.tune)
test_score, eval_score = scores
if eval_score[0] > maxx:
maxx = eval_score[0]
best_score = test_score
hyperparameters_best = hyperparameters
tf.reset_default_graph()
print()
print("Optimisation finished..")
print("Optimised hyperparameters:")
with open(os.path.dirname(args.checkpoint_file)+'/checkpoint', 'w') as fp:
fp.write('%s:"%s"\n' % ('model',args.model))
for attr, value in sorted(hyperparameters_best.items()):
print("{}={}".format(attr.upper(), value))
fp.write('%s:"%s"\n' % (attr, value))
print()
print("Final Test Data Accuracy = {:.5f}; 3-class F1 = {:.5f}; 2-class F1 = {:.5f}"
.format(best_score[0], best_score[1], best_score[2]))
def TRAIN(args, model):
t0 = time.time()
print("\nParameters:")
for attr, value in sorted(vars(args).items()):
print("{}={}".format(attr.upper(), value))
print()
print("Graph initialized..")
t1 = time.time()
print("time taken:", t1-t0)
print()
data = load_data(args, args.data, saved=args.load_data)
run_network(args, data, model, tuning=args.tune)
def run_network(args, data, model, tuning=False):
if model == 'LSTM':
nn = LSTM(args, data, tuning=tuning)
scores = nn.train_lstm(args, data)
return scores
elif model =='TDLSTM':
nn = TDLSTM(args, data, tuning=tuning)
scores = nn.train_tdlstm(args, data)
return scores
elif model =='TCLSTM':
nn = TCLSTM(args, data, tuning=tuning)
scores = nn.train_tclstm(args, data)
return scores
else:
print("No such model; please select from LSTM, TDLSTM or TCLSTM")
| [
"hyperopt.fmin",
"random.sample",
"tensorflow.reset_default_graph",
"utils.load_data",
"train_tclstm.TCLSTM",
"train_lstm.LSTM",
"itertools.product",
"tensorflow.logging.set_verbosity",
"hyperopt.hp.quniform",
"train_tdlstm.TDLSTM",
"numpy.random.randint",
"time.time",
"numpy.arange",
"hyp... | [((162, 204), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (186, 204), True, 'import tensorflow as tf\n'), ((680, 724), 'random.sample', 'random.sample', (['expanded_param_grid', 'sampsize'], {}), '(expanded_param_grid, sampsize)\n', (693, 724), False, 'import random\n'), ((2531, 2579), 'utils.load_data', 'load_data', (['args', 'args.data'], {'saved': 'args.load_data'}), '(args, args.data, saved=args.load_data)\n', (2540, 2579), False, 'from utils import load_data\n'), ((3413, 3421), 'hyperopt.Trials', 'Trials', ([], {}), '()\n', (3419, 3421), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((3436, 3522), 'hyperopt.fmin', 'fmin', (['objective', 'param_grid'], {'algo': 'tpe.suggest', 'trials': 'trials', 'max_evals': 'max_evals'}), '(objective, param_grid, algo=tpe.suggest, trials=trials, max_evals=\n max_evals)\n', (3440, 3522), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((4179, 4227), 'utils.load_data', 'load_data', (['args', 'args.data'], {'saved': 'args.load_data'}), '(args, args.data, saved=args.load_data)\n', (4188, 4227), False, 'from utils import load_data\n'), ((4763, 4811), 'utils.load_data', 'load_data', (['args', 'args.data'], {'saved': 'args.load_data'}), '(args, args.data, saved=args.load_data)\n', (4772, 4811), False, 'from utils import load_data\n'), ((6064, 6075), 'time.time', 'time.time', ([], {}), '()\n', (6073, 6075), False, 'import time\n'), ((6259, 6270), 'time.time', 'time.time', ([], {}), '()\n', (6268, 6270), False, 'import time\n'), ((6326, 6374), 'utils.load_data', 'load_data', (['args', 'args.data'], {'saved': 'args.load_data'}), '(args, args.data, saved=args.load_data)\n', (6335, 6374), False, 'from utils import load_data\n'), ((1651, 1675), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1673, 1675), True, 'import tensorflow as tf\n'), ((3265, 3289), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3287, 3289), True, 'import tensorflow as tf\n'), ((3852, 3890), 'hyperopt.hp.quniform', 'hp.quniform', (['"""num_hidden"""', '(100)', '(500)', '(1)'], {}), "('num_hidden', 100, 500, 1)\n", (3863, 3890), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((3983, 4027), 'hyperopt.hp.quniform', 'hp.quniform', (['"""dropout_output"""', '(0.3)', '(1.0)', '(0.1)'], {}), "('dropout_output', 0.3, 1.0, 0.1)\n", (3994, 4027), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((4054, 4097), 'hyperopt.hp.quniform', 'hp.quniform', (['"""dropout_input"""', '(0.3)', '(1.0)', '(0.1)'], {}), "('dropout_input', 0.3, 1.0, 0.1)\n", (4065, 4097), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((4120, 4159), 'hyperopt.hp.quniform', 'hp.quniform', (['"""clip_norm"""', '(0.5)', '(1.0)', '(0.1)'], {}), "('clip_norm', 0.5, 1.0, 0.1)\n", (4131, 4159), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((4509, 4540), 'numpy.random.randint', 'np.random.randint', (['(100)', '(501)', '(10)'], {}), '(100, 501, 10)\n', (4526, 4540), True, 'import numpy as np\n'), ((4603, 4627), 'numpy.arange', 'np.arange', (['(0.3)', '(1.1)', '(0.1)'], {}), '(0.3, 1.1, 0.1)\n', (4612, 4627), True, 'import numpy as np\n'), ((4654, 4678), 'numpy.arange', 'np.arange', (['(0.3)', '(1.1)', '(0.1)'], {}), '(0.3, 1.1, 0.1)\n', (4663, 4678), True, 'import numpy as np\n'), ((4701, 4726), 'numpy.arange', 'np.arange', (['(0.5)', '(1.01)', '(0.1)'], {}), '(0.5, 1.01, 0.1)\n', (4710, 4726), True, 'import numpy as np\n'), ((5433, 5457), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5455, 5457), True, 'import tensorflow as tf\n'), ((6517, 6548), 'train_lstm.LSTM', 'LSTM', (['args', 'data'], {'tuning': 'tuning'}), '(args, data, tuning=tuning)\n', (6521, 6548), False, 'from train_lstm import LSTM\n'), ((875, 933), 'itertools.product', 'it.product', (['*(param_grid[varName] for varName in varNames)'], {}), '(*(param_grid[varName] for varName in varNames))\n', (885, 933), True, 'import itertools as it\n'), ((6654, 6687), 'train_tdlstm.TDLSTM', 'TDLSTM', (['args', 'data'], {'tuning': 'tuning'}), '(args, data, tuning=tuning)\n', (6660, 6687), False, 'from train_tdlstm import TDLSTM\n'), ((6795, 6828), 'train_tclstm.TCLSTM', 'TCLSTM', (['args', 'data'], {'tuning': 'tuning'}), '(args, data, tuning=tuning)\n', (6801, 6828), False, 'from train_tclstm import TCLSTM\n')] |
# -*- coding: utf-8 -*-
#==========================================
# Title: CoCaBO_Base.py
# Author: <NAME> and <NAME>
# Date: 20 August 2019
# Link: https://arxiv.org/abs/1906.08878
#==========================================
import collections
import pickle
import random
import numpy as np
from scipy.optimize import minimize
from methods.BaseBO import BaseBO
from utils.DepRound import DepRound
from utils.probability import distr, draw
class CoCaBO_Base(BaseBO):
def __init__(self, objfn, initN, bounds, acq_type, C,
kernel_mix=0.5, mix_lr=10,
model_update_interval=10,
ard=False, **kwargs):
super().__init__(objfn, initN, bounds, C, **kwargs)
self.acq_type = acq_type
# Store the ht recommendations for each iteration
self.ht_recommedations = []
self.ht_hist_batch = []
# Store the name of the algorithm
self.policy = None
self.X = []
self.Y = []
# To check the best vals
self.gp_bestvals = []
self.ARD = ard
# Keeping track of current iteration helps control mix learning
self.iteration = None
self.model_hp = None
self.default_cont_lengthscale = 0.2
self.mix = kernel_mix
if ((model_update_interval % mix_lr == 0) or
(mix_lr % model_update_interval == 0)):
self.mix_learn_rate = mix_lr
self.model_update_interval = model_update_interval
else:
self.mix_learn_rate = min(mix_lr, model_update_interval)
self.model_update_interval = min(mix_lr, model_update_interval)
self.mix_used = 0.5
self.name = None
def estimate_alpha(self, batch_size, gamma, Wc, C):
def single_evaluation(alpha):
denominator = sum([alpha if val > alpha else val for idx, val in enumerate(Wc)])
rightside = (1 / batch_size - gamma / C) / (1 - gamma)
output = np.abs(alpha / denominator - rightside)
return output
x_tries = np.random.uniform(0, np.max(Wc), size=(100, 1))
y_tries = [single_evaluation(val) for val in x_tries]
# find x optimal for init
# print(f'ytry_len={len(y_tries)}')
idx_min = np.argmin(y_tries)
x_init_min = x_tries[idx_min]
res = minimize(single_evaluation, x_init_min, method='BFGS', options={'gtol': 1e-6, 'disp': False})
if isinstance(res, float):
return res
else:
return res.x
def runTrials(self, trials, budget, saving_path):
# Initialize mean_bestvals, stderr, hist
best_vals = []
mix_values = []
debug_values = []
n_working = trials
self.saving_path = saving_path
for i in range(trials):
print("Running trial: ", i)
self.trial_num = i
np.random.seed(i)
random.seed(i)
df = self.runOptim(budget=budget, seed=i)
best_vals.append(df['best_value'])
mix_values.append(df['mix_val'])
self.save_progress_to_disk(best_vals, debug_values, mix_values,
saving_path, df)
# Runoptim updates the ht_recommendation histogram
self.best_vals = best_vals
ht_hist = collections.Counter(np.array(self.ht_recommedations).ravel())
self.ht_recommedations = []
self.mean_best_vals = np.mean(best_vals, axis=0)
self.err_best_vals = np.std(best_vals, axis=0) / np.sqrt(n_working)
# For debugging
self.gp_bestvals = best_vals
self.ht_hist = ht_hist
self.n_working = n_working
return self.mean_best_vals, self.err_best_vals, ht_hist
def save_progress_to_disk(self, best_vals, debug_values, mix_values,
saving_path, df):
results_file_name = saving_path + self.name + \
f'_{self.batch_size}' + \
'_best_vals_' + \
self.acq_type + \
'_ARD_' + str(self.ARD) + '_mix_' + \
str(self.mix)
with open(results_file_name, 'wb') as file:
pickle.dump(best_vals, file)
if self.mix > 1 or self.mix < 0:
mix_file_name = saving_path + self.name + \
f'_{self.batch_size}_' + \
self.acq_type + \
'_ARD_' + str(self.ARD) + '_mix_' + \
str(self.mix) + '_mix_values'
with open(mix_file_name, 'wb') as file2:
pickle.dump(mix_values, file2)
if self.debug:
debug_file_name = saving_path + self.name + \
f'_{self.batch_size}_' + \
self.acq_type + \
'_ARD_' + str(self.ARD) + '_mix_' + \
str(self.mix) + '_debug'
with open(debug_file_name, 'wb') as file2:
pickle.dump(debug_values, file2)
df.to_pickle(f"{results_file_name}_df_s{self.trial_num}")
def compute_reward_for_all_cat_variable(self, ht_next_batch_list, batch_size):
# Obtain the reward for each categorical variable: B x len(self.C_list)
ht_batch_list_rewards = np.zeros((batch_size, len(self.C_list)))
for b in range(batch_size):
ht_next_list = ht_next_batch_list[b, :]
for i in range(len(ht_next_list)):
idices = np.where(self.data[0][:, i] == ht_next_list[i])
ht_result = self.result[0][idices]
ht_reward = np.max(ht_result * -1)
ht_batch_list_rewards[b, i] = ht_reward
return ht_batch_list_rewards
def update_weights_for_all_cat_var(self, Gt_ht_list, ht_batch_list, Wc_list, gamma_list,
probabilityDistribution_list, batch_size, S0=None):
for j in range(len(self.C_list)):
Wc = Wc_list[j]
C = self.C_list[j]
gamma = gamma_list[j]
probabilityDistribution = probabilityDistribution_list[j]
# print(f'cat_var={j}, prob={probabilityDistribution}')
if batch_size > 1:
ht_batch_list = ht_batch_list.astype(int)
Gt_ht = Gt_ht_list[:, j]
mybatch_ht = ht_batch_list[:, j] # 1xB
for ii, ht in enumerate(mybatch_ht):
Gt_ht_b = Gt_ht[ii]
estimatedReward = 1.0 * Gt_ht_b / probabilityDistribution[ht]
if ht not in S0:
Wc[ht] *= np.exp(batch_size * estimatedReward * gamma / C)
else:
Gt_ht = Gt_ht_list[j]
ht = ht_batch_list[j] # 1xB
estimatedReward = 1.0 * Gt_ht / probabilityDistribution[ht]
Wc[ht] *= np.exp(estimatedReward * gamma / C)
return Wc_list
def compute_prob_dist_and_draw_hts(self, Wc_list, gamma_list, batch_size):
if batch_size > 1:
ht_batch_list = np.zeros((batch_size, len(self.C_list)))
probabilityDistribution_list = []
for j in range(len(self.C_list)):
Wc = Wc_list[j]
gamma = gamma_list[j]
C = self.C_list[j]
# perform some truncation here
maxW = np.max(Wc)
temp = np.sum(Wc) * (1.0 / batch_size - gamma / C) / (1 - gamma)
if gamma < 1 and maxW >= temp:
# find a threshold alpha
alpha = self.estimate_alpha(batch_size, gamma, Wc, C)
S0 = [idx for idx, val in enumerate(Wc) if val > alpha]
else:
S0 = []
# Compute the probability for each category
probabilityDistribution = distr(Wc, gamma)
# draw a batch here
if batch_size < C:
mybatch_ht = DepRound(probabilityDistribution, k=batch_size)
else:
mybatch_ht = np.random.choice(len(probabilityDistribution), batch_size, p=probabilityDistribution)
# ht_batch_list size: len(self.C_list) x B
ht_batch_list[:, j] = mybatch_ht[:]
# ht_batch_list.append(mybatch_ht)
probabilityDistribution_list.append(probabilityDistribution)
return ht_batch_list, probabilityDistribution_list, S0
else:
ht_list = []
probabilityDistribution_list = []
for j in range(len(self.C_list)):
Wc = Wc_list[j]
gamma = gamma_list[j]
# Compute the probability for each category
probabilityDistribution = distr(Wc, gamma)
# Choose a categorical variable at random
ht = draw(probabilityDistribution)
ht_list.append(ht)
probabilityDistribution_list.append(probabilityDistribution)
return ht_list, probabilityDistribution_list
def compute_weights_for_init_data(self, Wc_list_init, gamma_list, batch_size):
ht_next_batch_list = self.data[0][:, :len(self.C)]
_, probabilityDistribution_list, S0 = self.compute_prob_dist_and_draw_hts(Wc_list_init, gamma_list,
ht_next_batch_list.shape[0])
Gt_ht_list = self.compute_reward_for_all_cat_variable(ht_next_batch_list, ht_next_batch_list.shape[0])
New_Wc_list = self.update_weights_for_all_cat_var(Gt_ht_list, ht_next_batch_list, Wc_list_init, gamma_list,
probabilityDistribution_list, ht_next_batch_list.shape[0],
S0=S0)
return New_Wc_list
def get_mix(self, hp_bounds):
fix_mix_in_this_iter = True
if (self.mix >= 0) and (self.mix <= 1): # mix param is fixed
mix_value = self.mix
elif ((self.iteration >= self.mix_learn_rate) and
(self.iteration % self.mix_learn_rate == 0)):
# learn mix
hp_bounds = np.vstack(([1e-6, 1], hp_bounds))
fix_mix_in_this_iter = False
mix_value = 0.5
else: # between learning iterations
mix_value = self.mix_used
return fix_mix_in_this_iter, mix_value, hp_bounds
# ========================================
# Over-ride this!
# =============================================================================
def runOptim(self, budget, seed):
raise NotImplementedError
# =============================================================================
# Get best value from nested list along with the index
# =============================================================================
def getBestVal2(self, my_list):
temp = [np.max(i * -1) for i in my_list]
indx1 = [np.argmax(i * -1) for i in my_list]
indx2 = np.argmax(temp)
val = np.max(temp)
list_indx = indx2
val_indx = indx1[indx2]
return val, list_indx, val_indx
def set_model_params_and_opt_flag(self, model):
"""
Returns opt_flag, model
"""
if ((self.iteration >= self.model_update_interval) and
(self.iteration % self.model_update_interval == 0)):
return True, model
else:
# No previous model_hp, so optimise
if self.model_hp is None:
self.model_hp = model.param_array
else:
# print(self.model_hp)
# print(model.param_array)
# previous iter learned mix, so remove mix before setting
if len(model.param_array) < len(self.model_hp):
model.param_array = self.model_hp[1:]
else:
model.param_array = self.model_hp
return False, model
| [
"numpy.sqrt",
"numpy.array",
"numpy.mean",
"numpy.where",
"numpy.max",
"numpy.exp",
"numpy.random.seed",
"numpy.vstack",
"numpy.argmin",
"numpy.abs",
"utils.probability.distr",
"utils.probability.draw",
"scipy.optimize.minimize",
"numpy.argmax",
"numpy.std",
"pickle.dump",
"utils.Dep... | [((2286, 2304), 'numpy.argmin', 'np.argmin', (['y_tries'], {}), '(y_tries)\n', (2295, 2304), True, 'import numpy as np\n'), ((2358, 2457), 'scipy.optimize.minimize', 'minimize', (['single_evaluation', 'x_init_min'], {'method': '"""BFGS"""', 'options': "{'gtol': 1e-06, 'disp': False}"}), "(single_evaluation, x_init_min, method='BFGS', options={'gtol': \n 1e-06, 'disp': False})\n", (2366, 2457), False, 'from scipy.optimize import minimize\n'), ((3473, 3499), 'numpy.mean', 'np.mean', (['best_vals'], {'axis': '(0)'}), '(best_vals, axis=0)\n', (3480, 3499), True, 'import numpy as np\n'), ((11186, 11201), 'numpy.argmax', 'np.argmax', (['temp'], {}), '(temp)\n', (11195, 11201), True, 'import numpy as np\n'), ((11216, 11228), 'numpy.max', 'np.max', (['temp'], {}), '(temp)\n', (11222, 11228), True, 'import numpy as np\n'), ((1994, 2033), 'numpy.abs', 'np.abs', (['(alpha / denominator - rightside)'], {}), '(alpha / denominator - rightside)\n', (2000, 2033), True, 'import numpy as np\n'), ((2101, 2111), 'numpy.max', 'np.max', (['Wc'], {}), '(Wc)\n', (2107, 2111), True, 'import numpy as np\n'), ((2908, 2925), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (2922, 2925), True, 'import numpy as np\n'), ((2938, 2952), 'random.seed', 'random.seed', (['i'], {}), '(i)\n', (2949, 2952), False, 'import random\n'), ((3529, 3554), 'numpy.std', 'np.std', (['best_vals'], {'axis': '(0)'}), '(best_vals, axis=0)\n', (3535, 3554), True, 'import numpy as np\n'), ((3557, 3575), 'numpy.sqrt', 'np.sqrt', (['n_working'], {}), '(n_working)\n', (3564, 3575), True, 'import numpy as np\n'), ((4265, 4293), 'pickle.dump', 'pickle.dump', (['best_vals', 'file'], {}), '(best_vals, file)\n', (4276, 4293), False, 'import pickle\n'), ((11084, 11098), 'numpy.max', 'np.max', (['(i * -1)'], {}), '(i * -1)\n', (11090, 11098), True, 'import numpy as np\n'), ((11134, 11151), 'numpy.argmax', 'np.argmax', (['(i * -1)'], {}), '(i * -1)\n', (11143, 11151), True, 'import numpy as np\n'), ((4685, 4715), 'pickle.dump', 'pickle.dump', (['mix_values', 'file2'], {}), '(mix_values, file2)\n', (4696, 4715), False, 'import pickle\n'), ((5096, 5128), 'pickle.dump', 'pickle.dump', (['debug_values', 'file2'], {}), '(debug_values, file2)\n', (5107, 5128), False, 'import pickle\n'), ((5594, 5641), 'numpy.where', 'np.where', (['(self.data[0][:, i] == ht_next_list[i])'], {}), '(self.data[0][:, i] == ht_next_list[i])\n', (5602, 5641), True, 'import numpy as np\n'), ((5721, 5743), 'numpy.max', 'np.max', (['(ht_result * -1)'], {}), '(ht_result * -1)\n', (5727, 5743), True, 'import numpy as np\n'), ((6980, 7015), 'numpy.exp', 'np.exp', (['(estimatedReward * gamma / C)'], {}), '(estimatedReward * gamma / C)\n', (6986, 7015), True, 'import numpy as np\n'), ((7485, 7495), 'numpy.max', 'np.max', (['Wc'], {}), '(Wc)\n', (7491, 7495), True, 'import numpy as np\n'), ((7971, 7987), 'utils.probability.distr', 'distr', (['Wc', 'gamma'], {}), '(Wc, gamma)\n', (7976, 7987), False, 'from utils.probability import distr, draw\n'), ((8895, 8911), 'utils.probability.distr', 'distr', (['Wc', 'gamma'], {}), '(Wc, gamma)\n', (8900, 8911), False, 'from utils.probability import distr, draw\n'), ((8991, 9020), 'utils.probability.draw', 'draw', (['probabilityDistribution'], {}), '(probabilityDistribution)\n', (8995, 9020), False, 'from utils.probability import distr, draw\n'), ((10330, 10364), 'numpy.vstack', 'np.vstack', (['([1e-06, 1], hp_bounds)'], {}), '(([1e-06, 1], hp_bounds))\n', (10339, 10364), True, 'import numpy as np\n'), ((3365, 3397), 'numpy.array', 'np.array', (['self.ht_recommedations'], {}), '(self.ht_recommedations)\n', (3373, 3397), True, 'import numpy as np\n'), ((8093, 8140), 'utils.DepRound.DepRound', 'DepRound', (['probabilityDistribution'], {'k': 'batch_size'}), '(probabilityDistribution, k=batch_size)\n', (8101, 8140), False, 'from utils.DepRound import DepRound\n'), ((6728, 6776), 'numpy.exp', 'np.exp', (['(batch_size * estimatedReward * gamma / C)'], {}), '(batch_size * estimatedReward * gamma / C)\n', (6734, 6776), True, 'import numpy as np\n'), ((7519, 7529), 'numpy.sum', 'np.sum', (['Wc'], {}), '(Wc)\n', (7525, 7529), True, 'import numpy as np\n')] |
import argparse
import os
import random
# import sys
# sys.path.insert(0, "")
import numpy as np
import habitat
from habitat.core.challenge import Challenge
class RandomWalker(habitat.Agent):
def __init__(self):
self._POSSIBLE_ACTIONS = np.array([0,1,2,3])
def reset(self):
pass
def act(self, observations):
return {"action": np.random.choice(self._POSSIBLE_ACTIONS)}
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--phase", type=str, required=False, choices=["dev", "standard", "challenge"]
)
args = parser.parse_args()
phase = args.phase
agent = RandomWalker()
challenge = Challenge(phase = phase)
challenge.submit(agent)
if __name__ == "__main__":
main() | [
"numpy.random.choice",
"numpy.array",
"habitat.core.challenge.Challenge",
"argparse.ArgumentParser"
] | [((434, 459), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (457, 459), False, 'import argparse\n'), ((674, 696), 'habitat.core.challenge.Challenge', 'Challenge', ([], {'phase': 'phase'}), '(phase=phase)\n', (683, 696), False, 'from habitat.core.challenge import Challenge\n'), ((250, 272), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (258, 272), True, 'import numpy as np\n'), ((365, 405), 'numpy.random.choice', 'np.random.choice', (['self._POSSIBLE_ACTIONS'], {}), '(self._POSSIBLE_ACTIONS)\n', (381, 405), True, 'import numpy as np\n')] |
from keras import backend as K
import numpy as np
def Active_Contour_Loss(y_true, y_pred):
#y_pred = K.cast(y_pred, dtype = 'float64')
"""
lenth term
"""
x = y_pred[:,:,1:,:] - y_pred[:,:,:-1,:] # horizontal and vertical directions
y = y_pred[:,:,:,1:] - y_pred[:,:,:,:-1]
delta_x = x[:,:,1:,:-2]**2
delta_y = y[:,:,:-2,1:]**2
delta_u = K.abs(delta_x + delta_y)
epsilon = 0.00000001 # where is a parameter to avoid square root is zero in practice.
w = 1
lenth = w * K.sum(K.sqrt(delta_u + epsilon)) # equ.(11) in the paper
"""
region term
"""
C_1 = np.ones((256, 256))
C_2 = np.zeros((256, 256))
region_in = K.abs(K.sum( y_pred[:,0,:,:] * ((y_true[:,0,:,:] - C_1)**2) ) ) # equ.(12) in the paper
region_out = K.abs(K.sum( (1-y_pred[:,0,:,:]) * ((y_true[:,0,:,:] - C_2)**2) )) # equ.(12) in the paper
lambdaP = 1 # lambda parameter could be various.
loss = lenth + lambdaP * (region_in + region_out)
return loss
| [
"numpy.ones",
"keras.backend.sum",
"keras.backend.sqrt",
"numpy.zeros",
"keras.backend.abs"
] | [((355, 379), 'keras.backend.abs', 'K.abs', (['(delta_x + delta_y)'], {}), '(delta_x + delta_y)\n', (360, 379), True, 'from keras import backend as K\n'), ((578, 597), 'numpy.ones', 'np.ones', (['(256, 256)'], {}), '((256, 256))\n', (585, 597), True, 'import numpy as np\n'), ((605, 625), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {}), '((256, 256))\n', (613, 625), True, 'import numpy as np\n'), ((646, 705), 'keras.backend.sum', 'K.sum', (['(y_pred[:, 0, :, :] * (y_true[:, 0, :, :] - C_1) ** 2)'], {}), '(y_pred[:, 0, :, :] * (y_true[:, 0, :, :] - C_1) ** 2)\n', (651, 705), True, 'from keras import backend as K\n'), ((748, 813), 'keras.backend.sum', 'K.sum', (['((1 - y_pred[:, 0, :, :]) * (y_true[:, 0, :, :] - C_2) ** 2)'], {}), '((1 - y_pred[:, 0, :, :]) * (y_true[:, 0, :, :] - C_2) ** 2)\n', (753, 813), True, 'from keras import backend as K\n'), ((495, 520), 'keras.backend.sqrt', 'K.sqrt', (['(delta_u + epsilon)'], {}), '(delta_u + epsilon)\n', (501, 520), True, 'from keras import backend as K\n')] |
import unittest
from transition_sampling.engines import ShootingResult
from transition_sampling.algo.aimless_shooting import AsyncAimlessShooting, \
generate_velocities
import numpy as np
import tempfile
class NextPositionTest(unittest.TestCase):
"""Test that picking the next position works"""
def test_pick_next_position(self):
"""Test some configurations of +1/0/-1 offset"""
# Set the seed for reproducible results.
np.random.seed(1)
with tempfile.TemporaryDirectory() as temp_dir:
aimless = AsyncAimlessShooting(None, None, 300, None)
aimless.current_start = np.zeros((2, 3))
fwd = {"commit": 1,
"frames": np.array([np.zeros((2, 3)) + 1, np.zeros((2, 3)) + 2])}
rev = {"commit": 2,
"frames": np.array([np.zeros((2, 3)) - 1, np.zeros((2, 3)) - 2])}
test_result = ShootingResult(fwd, rev)
correct_choice = [1, -1, -2, 1, 0, -2, 0, 0, -2, 1]
for i in range(0, 10):
aimless.current_offset = (i % 3) - 1
picked = aimless.pick_starting(test_result)
self.assertEqual(correct_choice[i], picked[0, 0])
class VelocityGenerationTest(unittest.TestCase):
"""Test that velocity generation works"""
def test_velocities_are_arrays(self):
# Set the seed for reproducible results.
np.random.seed(1)
test_atoms = ['Ar'] * 1000
test_temp1 = 300 # K
test_vel1 = generate_velocities(test_atoms, test_temp1)
# Assert that a numpy array is returned.
self.assertTrue(isinstance(test_vel1, np.ndarray))
def test_velocity_shape(self):
# Set the seed for reproducible results.
np.random.seed(1)
test_atoms = ['Ar'] * 1000
test_temp1 = 300 # K
test_vel1 = generate_velocities(test_atoms, test_temp1)
# Test that the shape of the velocities are correct.
self.assertEqual(test_vel1.shape, (len(test_atoms), 3))
def test_velocity_distribution_peak_location(self):
# Set the seed for reproducible results.
np.random.seed(1)
test_atoms = ['Ar'] * 1000
test_temp1 = 300 # K
test_temp2 = 1000 # K
test_vel1 = generate_velocities(test_atoms, test_temp1)
test_vel2 = generate_velocities(test_atoms, test_temp2)
# Histogram each velocity distribution and assert that the peak
# for T = 300 K is higher and occurs at a lower temperature
# than T = 1000 K.
test_vel_mag1 = np.linalg.norm(test_vel1, axis=1)
test_vel_mag2 = np.linalg.norm(test_vel2, axis=1)
counts1, _ = np.histogram(test_vel_mag1, bins=20, range=(1e2, 1e4))
counts2, _ = np.histogram(test_vel_mag2, bins=20, range=(1e2, 1e4))
max1 = np.max(counts1)
max2 = np.max(counts2)
max_loc1 = np.argmax(counts1)
max_loc2 = np.argmax(counts2)
self.assertTrue(max1 > max2)
self.assertTrue(max_loc1 < max_loc2)
if __name__ == '__main__':
unittest.main()
| [
"tempfile.TemporaryDirectory",
"numpy.histogram",
"transition_sampling.algo.aimless_shooting.generate_velocities",
"numpy.argmax",
"numpy.max",
"numpy.zeros",
"numpy.random.seed",
"numpy.linalg.norm",
"unittest.main",
"transition_sampling.algo.aimless_shooting.AsyncAimlessShooting",
"transition_... | [((3102, 3117), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3115, 3117), False, 'import unittest\n'), ((461, 478), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (475, 478), True, 'import numpy as np\n'), ((1422, 1439), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1436, 1439), True, 'import numpy as np\n'), ((1527, 1570), 'transition_sampling.algo.aimless_shooting.generate_velocities', 'generate_velocities', (['test_atoms', 'test_temp1'], {}), '(test_atoms, test_temp1)\n', (1546, 1570), False, 'from transition_sampling.algo.aimless_shooting import AsyncAimlessShooting, generate_velocities\n'), ((1774, 1791), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1788, 1791), True, 'import numpy as np\n'), ((1879, 1922), 'transition_sampling.algo.aimless_shooting.generate_velocities', 'generate_velocities', (['test_atoms', 'test_temp1'], {}), '(test_atoms, test_temp1)\n', (1898, 1922), False, 'from transition_sampling.algo.aimless_shooting import AsyncAimlessShooting, generate_velocities\n'), ((2164, 2181), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2178, 2181), True, 'import numpy as np\n'), ((2300, 2343), 'transition_sampling.algo.aimless_shooting.generate_velocities', 'generate_velocities', (['test_atoms', 'test_temp1'], {}), '(test_atoms, test_temp1)\n', (2319, 2343), False, 'from transition_sampling.algo.aimless_shooting import AsyncAimlessShooting, generate_velocities\n'), ((2364, 2407), 'transition_sampling.algo.aimless_shooting.generate_velocities', 'generate_velocities', (['test_atoms', 'test_temp2'], {}), '(test_atoms, test_temp2)\n', (2383, 2407), False, 'from transition_sampling.algo.aimless_shooting import AsyncAimlessShooting, generate_velocities\n'), ((2601, 2634), 'numpy.linalg.norm', 'np.linalg.norm', (['test_vel1'], {'axis': '(1)'}), '(test_vel1, axis=1)\n', (2615, 2634), True, 'import numpy as np\n'), ((2659, 2692), 'numpy.linalg.norm', 'np.linalg.norm', (['test_vel2'], {'axis': '(1)'}), '(test_vel2, axis=1)\n', (2673, 2692), True, 'import numpy as np\n'), ((2715, 2775), 'numpy.histogram', 'np.histogram', (['test_vel_mag1'], {'bins': '(20)', 'range': '(100.0, 10000.0)'}), '(test_vel_mag1, bins=20, range=(100.0, 10000.0))\n', (2727, 2775), True, 'import numpy as np\n'), ((2791, 2851), 'numpy.histogram', 'np.histogram', (['test_vel_mag2'], {'bins': '(20)', 'range': '(100.0, 10000.0)'}), '(test_vel_mag2, bins=20, range=(100.0, 10000.0))\n', (2803, 2851), True, 'import numpy as np\n'), ((2862, 2877), 'numpy.max', 'np.max', (['counts1'], {}), '(counts1)\n', (2868, 2877), True, 'import numpy as np\n'), ((2893, 2908), 'numpy.max', 'np.max', (['counts2'], {}), '(counts2)\n', (2899, 2908), True, 'import numpy as np\n'), ((2929, 2947), 'numpy.argmax', 'np.argmax', (['counts1'], {}), '(counts1)\n', (2938, 2947), True, 'import numpy as np\n'), ((2967, 2985), 'numpy.argmax', 'np.argmax', (['counts2'], {}), '(counts2)\n', (2976, 2985), True, 'import numpy as np\n'), ((493, 522), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (520, 522), False, 'import tempfile\n'), ((559, 602), 'transition_sampling.algo.aimless_shooting.AsyncAimlessShooting', 'AsyncAimlessShooting', (['None', 'None', '(300)', 'None'], {}), '(None, None, 300, None)\n', (579, 602), False, 'from transition_sampling.algo.aimless_shooting import AsyncAimlessShooting, generate_velocities\n'), ((639, 655), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (647, 655), True, 'import numpy as np\n'), ((919, 943), 'transition_sampling.engines.ShootingResult', 'ShootingResult', (['fwd', 'rev'], {}), '(fwd, rev)\n', (933, 943), False, 'from transition_sampling.engines import ShootingResult\n'), ((728, 744), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (736, 744), True, 'import numpy as np\n'), ((750, 766), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (758, 766), True, 'import numpy as np\n'), ((846, 862), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (854, 862), True, 'import numpy as np\n'), ((868, 884), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (876, 884), True, 'import numpy as np\n')] |
import sys, os
sys.path.insert(1, "../")
sys.path.append("../../../")
sys.path.append("../../../competitors/AIF360/")
import numpy as np
np.random.seed(0)
from aif360.datasets import SalaryDataset, BinaryLabelDataset, StructuredDataset
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.metrics import ClassificationMetric
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, MaxAbsScaler
from sklearn.metrics import accuracy_score
import tensorflow as tf
import load_salary as load_file
dist = load_file.dist
perm = int(sys.argv[1])
ordering = load_file.permutations(perm)
biased_test_points = np.load(f"{os.path.dirname(os.path.realpath(__file__))}/../../salary/salary_biased_points_dist{dist}.npy")
debiased_test = bool(int(sys.argv[2]))
dataset_orig = SalaryDataset(
protected_attribute_names=['sex'],
privileged_classes=[[1]],
normalized = False,
permute=perm
)
train_examples = 40
dataset_orig_train, dataset_orig_test = dataset_orig.split([train_examples], shuffle=False)
assert(len(dataset_orig_train.convert_to_dataframe()[0]) == train_examples)
if debiased_test:
test_points = np.array(ordering[train_examples:])
mask = np.in1d(test_points, biased_test_points) # True if the point is biased
mask_new = ~mask
x = mask_new.astype(int).nonzero()[0]
dataset_orig_test = dataset_orig_test.subset(x)
assert(len(dataset_orig_test.convert_to_dataframe()[0]) < 52 - train_examples)
else:
assert(len(dataset_orig_test.convert_to_dataframe()[0]) == 52 - train_examples)
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
min_max_scaler = MaxAbsScaler()
dataset_orig_train.features = min_max_scaler.fit_transform(dataset_orig_train.features)
dataset_orig_test.features = min_max_scaler.transform(dataset_orig_test.features)
sess = tf.Session()
debiased_model = AdversarialDebiasing(privileged_groups = privileged_groups,
unprivileged_groups = unprivileged_groups,
scope_name='debiased_classifier',
debias=True,
sess=sess, num_epochs=200)
debiased_model.fit(dataset_orig_train)
dataset_debiasing_train = debiased_model.predict(dataset_orig_train)
dataset_debiasing_test = debiased_model.predict(dataset_orig_test)
classified_metric_debiasing_test = ClassificationMetric(dataset_orig_test,
dataset_debiasing_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
classified_metric_debiasing_train = ClassificationMetric(dataset_orig_train,
dataset_debiasing_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
train_acc = classified_metric_debiasing_train.accuracy()
test_acc = classified_metric_debiasing_test.accuracy()
# import ipdb; ipdb.set_trace()
# if dataset_orig_test.convert_to_dataframe()[0]
diff = classified_metric_debiasing_test.statistical_parity_difference()
def find_discm_examples(class0_data, class1_data, print_file, scheme):
import pandas as pd
assert class0_data.shape[0] == class1_data.shape[0]
cols = ['sex','rank','year','degree','Experience']
df0 = pd.DataFrame(data=class0_data, columns=cols, dtype='float')
df0['salary'] = 0
df0_binary = BinaryLabelDataset(df=df0, label_names=['salary'], protected_attribute_names=['sex'])
df0_pred = debiased_model.predict(df0_binary)
df1 = pd.DataFrame(data=class1_data, columns=cols, dtype='float')
df1['salary'] = 0
df1_binary = BinaryLabelDataset(df=df1, label_names=['salary'], protected_attribute_names=['sex'])
df1_pred = debiased_model.predict(df1_binary)
assert(not np.all(df0_binary.labels)) # all of them should be 0
assert(not np.all(df1_binary.labels))
predictions_class0 = df0_pred.labels
predictions_class1 = df1_pred.labels
return sum(predictions_class0 != predictions_class1)[0] # Gives the number of discriminating examples
# sys.path.append("../../../scripts/")
from find_discm_points import entire_test_suite
class0_data, class1_data = entire_test_suite(mini=False, disparateremoved=False) # False means loads entire data
num_dicsm = find_discm_examples(class0_data, class1_data, print_file=False, scheme=8)
size = class0_data.shape[0]/100
print("Discrimination:", num_dicsm)
dataset = "salary"
if debiased_test:
with open(f"results_adversarial_debiased_{dataset}_dist{dist}.csv", "a") as f:
f.write(f'{train_acc},{test_acc},{perm},{diff},{num_dicsm},{num_dicsm/size}\n')
else:
with open(f"results_adversarial_debiased_{dataset}_fulltest_dist{dist}.csv", "a") as f:
f.write(f'{train_acc},{test_acc},{perm},{diff},{num_dicsm},{num_dicsm/size}\n')
| [
"aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing",
"sys.path.insert",
"numpy.all",
"numpy.in1d",
"tensorflow.Session",
"find_discm_points.entire_test_suite",
"aif360.datasets.SalaryDataset",
"numpy.array",
"os.path.realpath",
"numpy.random.seed",
"load_salary.permutatio... | [((15, 40), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../"""'], {}), "(1, '../')\n", (30, 40), False, 'import sys, os\n'), ((43, 71), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (58, 71), False, 'import sys, os\n'), ((72, 119), 'sys.path.append', 'sys.path.append', (['"""../../../competitors/AIF360/"""'], {}), "('../../../competitors/AIF360/')\n", (87, 119), False, 'import sys, os\n'), ((140, 157), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (154, 157), True, 'import numpy as np\n'), ((699, 727), 'load_salary.permutations', 'load_file.permutations', (['perm'], {}), '(perm)\n', (721, 727), True, 'import load_salary as load_file\n'), ((911, 1021), 'aif360.datasets.SalaryDataset', 'SalaryDataset', ([], {'protected_attribute_names': "['sex']", 'privileged_classes': '[[1]]', 'normalized': '(False)', 'permute': 'perm'}), "(protected_attribute_names=['sex'], privileged_classes=[[1]],\n normalized=False, permute=perm)\n", (924, 1021), False, 'from aif360.datasets import SalaryDataset, BinaryLabelDataset, StructuredDataset\n'), ((1779, 1793), 'sklearn.preprocessing.MaxAbsScaler', 'MaxAbsScaler', ([], {}), '()\n', (1791, 1793), False, 'from sklearn.preprocessing import StandardScaler, MaxAbsScaler\n'), ((1971, 1983), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1981, 1983), True, 'import tensorflow as tf\n'), ((2002, 2183), 'aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing', 'AdversarialDebiasing', ([], {'privileged_groups': 'privileged_groups', 'unprivileged_groups': 'unprivileged_groups', 'scope_name': '"""debiased_classifier"""', 'debias': '(True)', 'sess': 'sess', 'num_epochs': '(200)'}), "(privileged_groups=privileged_groups,\n unprivileged_groups=unprivileged_groups, scope_name=\n 'debiased_classifier', debias=True, sess=sess, num_epochs=200)\n", (2022, 2183), False, 'from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing\n'), ((2495, 2645), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['dataset_orig_test', 'dataset_debiasing_test'], {'unprivileged_groups': 'unprivileged_groups', 'privileged_groups': 'privileged_groups'}), '(dataset_orig_test, dataset_debiasing_test,\n unprivileged_groups=unprivileged_groups, privileged_groups=\n privileged_groups)\n', (2515, 2645), False, 'from aif360.metrics import ClassificationMetric\n'), ((2822, 2974), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['dataset_orig_train', 'dataset_debiasing_train'], {'unprivileged_groups': 'unprivileged_groups', 'privileged_groups': 'privileged_groups'}), '(dataset_orig_train, dataset_debiasing_train,\n unprivileged_groups=unprivileged_groups, privileged_groups=\n privileged_groups)\n', (2842, 2974), False, 'from aif360.metrics import ClassificationMetric\n'), ((4577, 4630), 'find_discm_points.entire_test_suite', 'entire_test_suite', ([], {'mini': '(False)', 'disparateremoved': '(False)'}), '(mini=False, disparateremoved=False)\n', (4594, 4630), False, 'from find_discm_points import entire_test_suite\n'), ((1285, 1320), 'numpy.array', 'np.array', (['ordering[train_examples:]'], {}), '(ordering[train_examples:])\n', (1293, 1320), True, 'import numpy as np\n'), ((1332, 1372), 'numpy.in1d', 'np.in1d', (['test_points', 'biased_test_points'], {}), '(test_points, biased_test_points)\n', (1339, 1372), True, 'import numpy as np\n'), ((3614, 3673), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'class0_data', 'columns': 'cols', 'dtype': '"""float"""'}), "(data=class0_data, columns=cols, dtype='float')\n", (3626, 3673), True, 'import pandas as pd\n'), ((3721, 3810), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df0', 'label_names': "['salary']", 'protected_attribute_names': "['sex']"}), "(df=df0, label_names=['salary'],\n protected_attribute_names=['sex'])\n", (3739, 3810), False, 'from aif360.datasets import SalaryDataset, BinaryLabelDataset, StructuredDataset\n'), ((3876, 3935), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'class1_data', 'columns': 'cols', 'dtype': '"""float"""'}), "(data=class1_data, columns=cols, dtype='float')\n", (3888, 3935), True, 'import pandas as pd\n'), ((3983, 4072), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df1', 'label_names': "['salary']", 'protected_attribute_names': "['sex']"}), "(df=df1, label_names=['salary'],\n protected_attribute_names=['sex'])\n", (4001, 4072), False, 'from aif360.datasets import SalaryDataset, BinaryLabelDataset, StructuredDataset\n'), ((4143, 4168), 'numpy.all', 'np.all', (['df0_binary.labels'], {}), '(df0_binary.labels)\n', (4149, 4168), True, 'import numpy as np\n'), ((4221, 4246), 'numpy.all', 'np.all', (['df1_binary.labels'], {}), '(df1_binary.labels)\n', (4227, 4246), True, 'import numpy as np\n'), ((776, 802), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (792, 802), False, 'import sys, os\n')] |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class CholeskyOpTest(tf.test.TestCase):
def _verifyCholesky(self, x):
with self.test_session() as sess:
# Verify that LL^T == x.
if x.ndim == 2:
chol = tf.cholesky(x)
verification = tf.matmul(chol,
chol,
transpose_a=False,
transpose_b=True)
else:
chol = tf.batch_cholesky(x)
verification = tf.batch_matmul(chol, chol, adj_x=False, adj_y=True)
chol_np, verification_np = sess.run([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def testBasic(self):
self._verifyCholesky(np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]))
def testBatch(self):
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices)
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
def testWrongDimensions(self):
tensor3 = tf.constant([1., 2.])
with self.assertRaises(ValueError):
tf.cholesky(tensor3)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session():
with self.assertRaisesOpError("LLT decomposition was not successful. The "
"input might not be valid."):
# All rows of the matrix below add to zero
self._verifyCholesky(np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1.,
1.]]))
def testEmpty(self):
self._verifyCholesky(np.empty([0, 2, 2]))
self._verifyCholesky(np.empty([2, 0, 0]))
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.batch_matmul",
"numpy.reshape",
"numpy.random.rand",
"tensorflow.batch_cholesky",
"tensorflow.test.main",
"numpy.diag",
"numpy.array",
"numpy.dot",
"tensorflow.constant",
"six.moves.xrange",
"numpy.vstack",
"numpy.empty",
"tensorflow.matmul",
"numpy.tril",
"tensorflow.cholesk... | [((3532, 3546), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3544, 3546), True, 'import tensorflow as tf\n'), ((2147, 2183), 'numpy.array', 'np.array', (['[[[1.0, 0.0], [0.0, 5.0]]]'], {}), '([[[1.0, 0.0], [0.0, 5.0]]])\n', (2155, 2183), True, 'import numpy as np\n'), ((2326, 2389), 'numpy.array', 'np.array', (['[[[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [2.0, 0.0, 5.0]]]'], {}), '([[[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [2.0, 0.0, 5.0]]])\n', (2334, 2389), True, 'import numpy as np\n'), ((2520, 2544), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)', '(5)'], {}), '(10, 5, 5)\n', (2534, 2544), True, 'import numpy as np\n'), ((2558, 2568), 'six.moves.xrange', 'xrange', (['(10)'], {}), '(10)\n', (2564, 2568), False, 'from six.moves import xrange\n'), ((2840, 2863), 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2851, 2863), True, 'import tensorflow as tf\n'), ((1730, 1793), 'numpy.reshape', 'np.reshape', (['chol_np', '(-1, chol_np.shape[-2], chol_np.shape[-1])'], {}), '(chol_np, (-1, chol_np.shape[-2], chol_np.shape[-1]))\n', (1740, 1793), True, 'import numpy as np\n'), ((2049, 2110), 'numpy.array', 'np.array', (['[[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [2.0, 0.0, 5.0]]'], {}), '([[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [2.0, 0.0, 5.0]])\n', (2057, 2110), True, 'import numpy as np\n'), ((2263, 2302), 'numpy.vstack', 'np.vstack', (['(simple_array, simple_array)'], {}), '((simple_array, simple_array))\n', (2272, 2302), True, 'import numpy as np\n'), ((2407, 2452), 'numpy.vstack', 'np.vstack', (['(odd_sized_array, odd_sized_array)'], {}), '((odd_sized_array, odd_sized_array))\n', (2416, 2452), True, 'import numpy as np\n'), ((2590, 2624), 'numpy.dot', 'np.dot', (['matrices[i].T', 'matrices[i]'], {}), '(matrices[i].T, matrices[i])\n', (2596, 2624), True, 'import numpy as np\n'), ((2908, 2928), 'tensorflow.cholesky', 'tf.cholesky', (['tensor3'], {}), '(tensor3)\n', (2919, 2928), True, 'import tensorflow as tf\n'), ((3434, 3453), 'numpy.empty', 'np.empty', (['[0, 2, 2]'], {}), '([0, 2, 2])\n', (3442, 3453), True, 'import numpy as np\n'), ((3480, 3499), 'numpy.empty', 'np.empty', (['[2, 0, 0]'], {}), '([2, 0, 0])\n', (3488, 3499), True, 'import numpy as np\n'), ((1122, 1136), 'tensorflow.cholesky', 'tf.cholesky', (['x'], {}), '(x)\n', (1133, 1136), True, 'import tensorflow as tf\n'), ((1160, 1218), 'tensorflow.matmul', 'tf.matmul', (['chol', 'chol'], {'transpose_a': '(False)', 'transpose_b': '(True)'}), '(chol, chol, transpose_a=False, transpose_b=True)\n', (1169, 1218), True, 'import tensorflow as tf\n'), ((1345, 1365), 'tensorflow.batch_cholesky', 'tf.batch_cholesky', (['x'], {}), '(x)\n', (1362, 1365), True, 'import tensorflow as tf\n'), ((1389, 1441), 'tensorflow.batch_matmul', 'tf.batch_matmul', (['chol', 'chol'], {'adj_x': '(False)', 'adj_y': '(True)'}), '(chol, chol, adj_x=False, adj_y=True)\n', (1404, 1441), True, 'import tensorflow as tf\n'), ((2752, 2796), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [3.0, 4.0, 5.0]]'], {}), '([[1.0, 2.0, 3.0], [3.0, 4.0, 5.0]])\n', (2760, 2796), True, 'import numpy as np\n'), ((1918, 1938), 'numpy.tril', 'np.tril', (['chol_matrix'], {}), '(chol_matrix)\n', (1925, 1938), True, 'import numpy as np\n'), ((3256, 3321), 'numpy.array', 'np.array', (['[[1.0, -1.0, 0.0], [-1.0, 1.0, -1.0], [0.0, -1.0, 1.0]]'], {}), '([[1.0, -1.0, 0.0], [-1.0, 1.0, -1.0], [0.0, -1.0, 1.0]])\n', (3264, 3321), True, 'import numpy as np\n'), ((1965, 1985), 'numpy.diag', 'np.diag', (['chol_matrix'], {}), '(chol_matrix)\n', (1972, 1985), True, 'import numpy as np\n')] |
# License: BSD 3 clause
from copy import copy
from datetime import datetime
from typing import List, Tuple
import numpy as np
import pyspark.sql.functions as sf
import pytz
from pyspark.ml.feature import Bucketizer
from pyspark.sql import DataFrame
from scalpel.core.cohort import Cohort
from scalpel.core.util import rename_df_columns
class BaseFeatureDriver(object):
"""Base class of for drivers, which should load data from cohorts to a specific
format.
Loaders should not be designed to do filtering or sanitizing. Users are responsible
for the inputs: all event timestamps should be in (study_start, study_end) and
age_groups should match the ages of the base_population contained in the studied
cohort.
These checks should be implemented in the `check_metadata` method.
There are important considerations to be aware of when working with timestamps and
dataframes, see pyspark documentation:
https://spark.apache.org/docs/latest/sql-pyspark-pandas-with-arrow.html#timestamp-with-time-zone-semantics
Parameters
----------
base_population: `Cohort`
Initial cohort containing the subjects for which we want to compute the data.
This cohort should contain subjects, but its events are not used.
followups: `Cohort`
Cohort containing the follow up information. This cohort should contain subjects
and events matching the FollowUp case class, that is the columns: 'patientID',
'start', 'end', 'endReason'.
study_start: `datetime`
Date of the study start. Beware of timezones issues!
study_end: `datetime`
Date of the study end. Beware of timezones issues!
age_reference_date: `datetime`
Date used to compute the age of the base_population contained in the cohorts.
age_groups: `List[int]`, default=None
Bounds defining longitudinal age groups. If set to None, age related data are
not computed. These bounds must be sorted. Beware: take into account the ageing
of base_population when defining these. Minimum bound should be <= minimum age,
maximum bound should be >= maximum age + study length.
run_checks: `bool`, default=True
Automated checks are performed on cohorts passed to the drivers. If you don't
want these checks to be ran, set this option to False. Disabling the checks
might increase performance, but use at your own risk!
"""
def __init__(
self,
base_population: Cohort,
followups: Cohort,
study_start: datetime,
study_end: datetime,
age_reference_date: datetime,
age_groups: list = None,
run_checks: bool = True,
):
self.run_checks = run_checks
self._study_start = None
self._study_end = None
self._is_using_longitudinal_age_groups = False
if not self._has_timezone(study_start):
raise ValueError("study_start should have a timezone. Please use pytz.")
if not self._has_timezone(study_end):
raise ValueError("study_end should have a timezone. Please use pytz.")
if study_start >= study_end:
raise ValueError("study_start should be < study_end")
self._study_start = study_start
self._study_end = study_end
if not self._has_timezone(age_reference_date):
raise ValueError(
"age_reference_date should have a timezone. " "Please use pytz."
)
if age_reference_date < self.study_start:
raise ValueError("age_reference_date should be >= study_start.")
self._age_reference_date = age_reference_date
self._age_groups = None
self.age_groups = age_groups
self.n_age_groups = len(age_groups) - 1 if age_groups is not None else 0
self._followups = None
self._base_population = None
self.followups = followups
self.base_population = base_population
def load(self):
raise NotImplementedError("load method is not implemented in BaseLoader.")
@property
def study_start(self):
return self._study_start
@study_start.setter
def study_start(self, value):
raise PermissionError(
"study_start should not be updated after loader initialisation"
)
@property
def study_end(self):
return self._study_end
@study_end.setter
def study_end(self, value):
raise PermissionError(
"study_end should not be updated after loader initialisation"
)
@property
def age_reference_date(self) -> datetime:
return self._age_reference_date
@age_reference_date.setter
def age_reference_date(self, value: datetime) -> None:
raise PermissionError(
"age_reference_date should not be updated after loader initialisation."
)
@property
def age_groups(self) -> List[float]:
return self._age_groups
@age_groups.setter
def age_groups(self, value: List[float]) -> None:
if value != sorted(value):
raise ValueError("age_groups bounds should be sorted.")
self._age_groups = value
@property
def base_population(self) -> Cohort:
return self._base_population
@base_population.setter
def base_population(self, value: Cohort) -> None:
if self.run_checks and self.age_groups is not None:
invalid = self._find_subjects_with_age_inconsistent_w_age_groups(value)
if invalid.subjects.take(1):
raise ValueError(
self._log_invalid_events_cohort(invalid, log_invalid_subjects=True)
)
self._base_population = value
@property
def followups(self) -> Cohort:
return self._followups
@followups.setter
def followups(self, value: Cohort) -> None:
if self.run_checks:
invalid = self._find_events_not_in_study_dates(value)
if invalid.events.take(1):
raise ValueError(
self._log_invalid_events_cohort(invalid, log_invalid_events=True)
)
invalid = self._find_inconsistent_start_end_ordering(value)
if invalid.events.take(1):
raise ValueError(
self._log_invalid_events_cohort(invalid, log_invalid_events=True)
)
self._followups = value
@property
def is_using_longitudinal_age_groups(self) -> bool:
return self._is_using_longitudinal_age_groups
@is_using_longitudinal_age_groups.setter
def is_using_longitudinal_age_groups(self, value: bool) -> None:
raise PermissionError(
"is_using_longitudinal_age_groups should not be set manually."
)
def _bucketize_age_column(
self, dataframe: DataFrame, input_col: str, output_col: str
) -> Tuple[DataFrame, int, List[str]]:
bucketizer = Bucketizer(
splits=self.age_groups, inputCol=input_col, outputCol=output_col
)
output = bucketizer.setHandleInvalid("keep").transform(dataframe)
splits = [s for s in bucketizer.getSplits()]
mapping = [
"[{}, {})".format(splits[i], splits[i + 1]) for i in range(len(splits) - 1)
]
n_age_groups = len(mapping)
return output, n_age_groups, mapping
def _find_events_not_in_followup_bounds(self, cohort: Cohort) -> Cohort:
fups = copy(self.followups)
fups.events = rename_df_columns(fups.events, prefix="fup_")
events = cohort.events.join(fups.events, "patientID")
# between returns false when col is null
invalid_events = events.where(
~(
sf.col("start").between(sf.col("fup_start"), sf.col("fup_end"))
& sf.col("end").between(sf.col("fup_start"), sf.col("fup_end"))
)
)
return Cohort(
cohort.name + "_inconsistent_w_followup_bounds",
"events inconsistent with followup bounds",
invalid_events.select("patientID").distinct(),
invalid_events,
)
def _find_events_not_in_study_dates(self, cohort: Cohort) -> Cohort:
# between returns false when col is null
invalid_events = cohort.events.where(
~(
sf.col("start").between(
sf.lit(self.study_start), sf.lit(self.study_end)
)
& sf.col("end").between(
sf.lit(self.study_start), sf.lit(self.study_end)
)
)
)
return Cohort(
cohort.name + "_inconsistent_w_study_dates",
"events inconsistent with study dates",
invalid_events.select("patientID").distinct(),
invalid_events,
)
def _find_subjects_with_age_inconsistent_w_age_groups(
self, cohort: Cohort
) -> Cohort:
"""Check if min and max age_groups are consistent with subjects ages."""
if not cohort.has_subject_information():
raise ValueError("Cohort should have subject information.")
duplicate = copy(cohort)
duplicate.add_age_information(self.age_reference_date) # add starting age
study_length = (
np.ceil((self.study_end - self.study_start).days / 365.25)
if self.is_using_longitudinal_age_groups
else 0
)
min_starting_age = min(self.age_groups)
max_starting_age = max(self.age_groups) - np.ceil(study_length)
invalid_subjects = duplicate.subjects.where(
~sf.col("age").between(min_starting_age, max_starting_age)
)
return Cohort(
cohort.name + "_inconsistent_w_ages_and_age_groups",
"subjects inconsistent with age groups",
invalid_subjects,
)
@staticmethod
def _find_inconsistent_start_end_ordering(cohort: Cohort) -> Cohort:
events = cohort.events
invalid_events = events.where(sf.col("start") >= sf.col("end"))
return Cohort(
cohort.name + "_inconsistent_w_start_end_ordering",
"events where start >= end dates are inconsistent",
invalid_events.select("patientID").distinct(),
invalid_events,
)
@staticmethod
def _log_invalid_events_cohort(
cohort: Cohort,
log_invalid_events: bool = False,
log_invalid_subjects: bool = False,
) -> str:
cohort_name, reference = cohort.name.split("_inconsistent_w_")
n_subjects = cohort.subjects.count()
msg = (
"Found {n_subjects} subjects in cohort {cohort_name} inconsistent with"
" {reference}.\n".format(
n_subjects=n_subjects, cohort_name=cohort_name, reference=reference
)
)
if log_invalid_events:
msg += "Showing first 10 invalid events below:\n"
msg += cohort.events.limit(10).toPandas().to_string(index=False)
msg += "\n"
if log_invalid_subjects:
msg += "Showing first 10 invalid subjects below:\n"
msg += cohort.subjects.limit(10).toPandas().to_string(index=False)
msg += "\n"
return msg
@staticmethod
def _has_timezone(date: pytz.datetime.datetime) -> bool:
"""Check if date has timezone."""
return date.tzinfo is not None
| [
"pyspark.sql.functions.lit",
"numpy.ceil",
"pyspark.ml.feature.Bucketizer",
"scalpel.core.cohort.Cohort",
"pyspark.sql.functions.col",
"scalpel.core.util.rename_df_columns",
"copy.copy"
] | [((6959, 7035), 'pyspark.ml.feature.Bucketizer', 'Bucketizer', ([], {'splits': 'self.age_groups', 'inputCol': 'input_col', 'outputCol': 'output_col'}), '(splits=self.age_groups, inputCol=input_col, outputCol=output_col)\n', (6969, 7035), False, 'from pyspark.ml.feature import Bucketizer\n'), ((7477, 7497), 'copy.copy', 'copy', (['self.followups'], {}), '(self.followups)\n', (7481, 7497), False, 'from copy import copy\n'), ((7520, 7565), 'scalpel.core.util.rename_df_columns', 'rename_df_columns', (['fups.events'], {'prefix': '"""fup_"""'}), "(fups.events, prefix='fup_')\n", (7537, 7565), False, 'from scalpel.core.util import rename_df_columns\n'), ((9173, 9185), 'copy.copy', 'copy', (['cohort'], {}), '(cohort)\n', (9177, 9185), False, 'from copy import copy\n'), ((9716, 9838), 'scalpel.core.cohort.Cohort', 'Cohort', (["(cohort.name + '_inconsistent_w_ages_and_age_groups')", '"""subjects inconsistent with age groups"""', 'invalid_subjects'], {}), "(cohort.name + '_inconsistent_w_ages_and_age_groups',\n 'subjects inconsistent with age groups', invalid_subjects)\n", (9722, 9838), False, 'from scalpel.core.cohort import Cohort\n'), ((9306, 9364), 'numpy.ceil', 'np.ceil', (['((self.study_end - self.study_start).days / 365.25)'], {}), '((self.study_end - self.study_start).days / 365.25)\n', (9313, 9364), True, 'import numpy as np\n'), ((9545, 9566), 'numpy.ceil', 'np.ceil', (['study_length'], {}), '(study_length)\n', (9552, 9566), True, 'import numpy as np\n'), ((10043, 10058), 'pyspark.sql.functions.col', 'sf.col', (['"""start"""'], {}), "('start')\n", (10049, 10058), True, 'import pyspark.sql.functions as sf\n'), ((10062, 10075), 'pyspark.sql.functions.col', 'sf.col', (['"""end"""'], {}), "('end')\n", (10068, 10075), True, 'import pyspark.sql.functions as sf\n'), ((7771, 7790), 'pyspark.sql.functions.col', 'sf.col', (['"""fup_start"""'], {}), "('fup_start')\n", (7777, 7790), True, 'import pyspark.sql.functions as sf\n'), ((7792, 7809), 'pyspark.sql.functions.col', 'sf.col', (['"""fup_end"""'], {}), "('fup_end')\n", (7798, 7809), True, 'import pyspark.sql.functions as sf\n'), ((7851, 7870), 'pyspark.sql.functions.col', 'sf.col', (['"""fup_start"""'], {}), "('fup_start')\n", (7857, 7870), True, 'import pyspark.sql.functions as sf\n'), ((7872, 7889), 'pyspark.sql.functions.col', 'sf.col', (['"""fup_end"""'], {}), "('fup_end')\n", (7878, 7889), True, 'import pyspark.sql.functions as sf\n'), ((8397, 8421), 'pyspark.sql.functions.lit', 'sf.lit', (['self.study_start'], {}), '(self.study_start)\n', (8403, 8421), True, 'import pyspark.sql.functions as sf\n'), ((8423, 8445), 'pyspark.sql.functions.lit', 'sf.lit', (['self.study_end'], {}), '(self.study_end)\n', (8429, 8445), True, 'import pyspark.sql.functions as sf\n'), ((8525, 8549), 'pyspark.sql.functions.lit', 'sf.lit', (['self.study_start'], {}), '(self.study_start)\n', (8531, 8549), True, 'import pyspark.sql.functions as sf\n'), ((8551, 8573), 'pyspark.sql.functions.lit', 'sf.lit', (['self.study_end'], {}), '(self.study_end)\n', (8557, 8573), True, 'import pyspark.sql.functions as sf\n'), ((9633, 9646), 'pyspark.sql.functions.col', 'sf.col', (['"""age"""'], {}), "('age')\n", (9639, 9646), True, 'import pyspark.sql.functions as sf\n'), ((7747, 7762), 'pyspark.sql.functions.col', 'sf.col', (['"""start"""'], {}), "('start')\n", (7753, 7762), True, 'import pyspark.sql.functions as sf\n'), ((7829, 7842), 'pyspark.sql.functions.col', 'sf.col', (['"""end"""'], {}), "('end')\n", (7835, 7842), True, 'import pyspark.sql.functions as sf\n'), ((8352, 8367), 'pyspark.sql.functions.col', 'sf.col', (['"""start"""'], {}), "('start')\n", (8358, 8367), True, 'import pyspark.sql.functions as sf\n'), ((8482, 8495), 'pyspark.sql.functions.col', 'sf.col', (['"""end"""'], {}), "('end')\n", (8488, 8495), True, 'import pyspark.sql.functions as sf\n')] |
# Copyright 2019 Image Analysis Lab, German Center for Neurodegenerative Diseases (DZNE), Bonn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IMPORTS
import optparse
import sys
import nibabel.freesurfer.io as fs
import numpy as np
import math
from lapy.DiffGeo import tria_mean_curvature_flow
from lapy.TriaMesh import TriaMesh
from lapy.read_geometry import read_geometry
from lapy.Solver import Solver
HELPTEXT = """
Script to compute ShapeDNA using linear FEM matrices.
After correcting sign flips, embeds a surface mesh into the spectral domain,
then projects it onto a unit sphere. This is scaled and rotated to match the
atlas used for FreeSurfer surface registion.
USAGE:
spherically_project -i <input_surface> -o <output_surface>
References:
<NAME> et al. Discrete Laplace-Beltrami Operators for Shape Analysis and
Segmentation. Computers & Graphics 33(3):381-390, 2009
Martin Reuter et al. Laplace-Beltrami spectra as "Shape-DNA" of surfaces and
solids Computer-Aided Design 38(4):342-366, 2006
<NAME> at al. High-resolution inter-subject averaging and a coordinate
system for the cortical surface. Human Brain Mapping 8:272-284, 1999
Dependencies:
Python 3.5
Scipy 0.10 or later to solve the generalized eigenvalue problem.
http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
Numpy
http://www.numpy.org
Nibabel to read and write FreeSurfer surface meshes
http://nipy.org/nibabel/
Original Author: <NAME>
Date: Jan-18-2016
"""
h_input = 'path to input surface'
h_output = 'path to ouput surface, spherically projected'
def options_parse():
"""
Command line option parser for spherically_project.py
"""
parser = optparse.OptionParser(version='$Id: spherically_project,v 1.1 2017/01/30 20:42:08 ltirrell Exp $',
usage=HELPTEXT)
parser.add_option('--input', '-i', dest='input_surf', help=h_input)
parser.add_option('--output', '-o', dest='output_surf', help=h_output)
(options, args) = parser.parse_args()
if options.input_surf is None or options.output_surf is None:
sys.exit('ERROR: Please specify input and output surfaces')
return options
def tria_spherical_project(tria, flow_iter=3, debug=False):
"""
spherical(tria) computes the first three non-constant eigenfunctions
and then projects the spectral embedding onto a sphere. This works
when the first functions have a single closed zero level set,
splitting the mesh into two domains each. Depending on the original
shape triangles could get inverted. We also flip the functions
according to the axes that they are aligned with for the special
case of brain surfaces in FreeSurfer coordinates.
Inputs: tria : TriaMesh
flow_iter : mean curv flow iterations (3 should be enough)
Outputs: tria : TriaMesh
"""
if not tria.is_closed():
raise ValueError('Error: Can only project closed meshes!')
# sub-function to compute flipped area of trias where normal
# points towards origin, meaningful for the sphere, centered at zero
def get_flipped_area(tria):
v1 = tria.v[tria.t[:, 0], :]
v2 = tria.v[tria.t[:, 1], :]
v3 = tria.v[tria.t[:, 2], :]
v2mv1 = v2 - v1
v3mv1 = v3 - v1
cr = np.cross(v2mv1, v3mv1)
spatvol = np.sum(v1 * cr, axis=1)
areas = 0.5 * np.sqrt(np.sum(cr * cr, axis=1))
area = np.sum(areas[np.where(spatvol < 0)])
return area
fem = Solver(tria, lump=False)
evals, evecs = fem.eigs(k=4)
if debug:
data = dict()
data['Eigenvalues'] = evals
data['Eigenvectors'] = evecs
data['Creator'] = 'spherically_project.py'
data['Refine'] = 0
data['Degree'] = 1
data['Dimension'] = 2
data['Elements'] = tria.t.shape[0]
data['DoF'] = evecs.shape[0]
data['NumEW'] = 4
from lapy.FuncIO import export_ev
export_ev(data, 'debug.ev')
# flip efuncs to align to coordinates consistently
ev1 = evecs[:, 1]
# ev1maxi = np.argmax(ev1)
# ev1mini = np.argmin(ev1)
# cmax = v[ev1maxi,:]
# cmin = v[ev1mini,:]
cmax1 = np.mean(tria.v[ev1 > 0.5 * np.max(ev1), :], 0)
cmin1 = np.mean(tria.v[ev1 < 0.5 * np.min(ev1), :], 0)
ev2 = evecs[:, 2]
cmax2 = np.mean(tria.v[ev2 > 0.5 * np.max(ev2), :], 0)
cmin2 = np.mean(tria.v[ev2 < 0.5 * np.min(ev2), :], 0)
ev3 = evecs[:, 3]
cmax3 = np.mean(tria.v[ev3 > 0.5 * np.max(ev3), :], 0)
cmin3 = np.mean(tria.v[ev3 < 0.5 * np.min(ev3), :], 0)
# we trust ev 1 goes from front to back
l11 = abs(cmax1[1] - cmin1[1])
l21 = abs(cmax2[1] - cmin2[1])
l31 = abs(cmax3[1] - cmin3[1])
if l11 < l21 or l11 < l31:
print("ERROR: direction 1 should be (anterior -posterior) but is not!")
print(" debug info: {} {} {} ".format(l11, l21, l31))
# sys.exit(1)
raise ValueError('Direction 1 should be anterior - posterior')
# only flip direction if necessary
print("ev1 min: {} max {} ".format(cmin1, cmax1))
# axis 1 = y is aligned with this function (for brains in FS space)
v1 = cmax1 - cmin1
if cmax1[1] < cmin1[1]:
ev1 = -1 * ev1
print("inverting direction 1 (anterior - posterior)")
l1 = abs(cmax1[1] - cmin1[1])
# for ev2 and ev3 there could be also a swap of the two
l22 = abs(cmax2[2] - cmin2[2])
l32 = abs(cmax3[2] - cmin3[2])
# usually ev2 should be superior inferior, if ev3 is better in that direction, swap
if l22 < l32:
print("swapping direction 2 and 3")
ev2, ev3 = ev3, ev2
cmax2, cmax3 = cmax3, cmax2
cmin2, cmin3 = cmin3, cmin2
l23 = abs(cmax2[0] - cmin2[0])
l33 = abs(cmax3[0] - cmin3[0])
if l33 < l23:
print("WARNING: direction 3 wants to swap with 2, but cannot")
print("ev2 min: {} max {} ".format(cmin2, cmax2))
# axis 2 = z is aligned with this function (for brains in FS space)
v2 = cmax2 - cmin2
if cmax2[2] < cmin2[2]:
ev2 = -1 * ev2
print("inverting direction 2 (superior - inferior)")
l2 = abs(cmax2[2] - cmin2[2])
print("ev3 min: {} max {} ".format(cmin3, cmax3))
# axis 0 = x is aligned with this function (for brains in FS space)
v3 = cmax3 - cmin3
if cmax3[0] < cmin3[0]:
ev3 = -1 * ev3
print("inverting direction 3 (right - left)")
l3 = abs(cmax3[0] - cmin3[0])
v1 = v1 * (1.0 / np.sqrt(np.sum(v1 * v1)))
v2 = v2 * (1.0 / np.sqrt(np.sum(v2 * v2)))
v3 = v3 * (1.0 / np.sqrt(np.sum(v3 * v3)))
spatvol = abs(np.dot(v1, np.cross(v2, v3)))
print("spat vol: {}".format(spatvol))
mvol = tria.volume()
print("orig mesh vol {}".format(mvol))
bvol = l1 * l2 * l3
print("box {}, {}, {} volume: {} ".format(l1, l2, l3, bvol))
print("box coverage: {}".format(bvol / mvol))
# we map evN to -1..0..+1 (keep zero level fixed)
# I have the feeling that this helps a little with the stretching
# at the poles, but who knows...
ev1min = np.amin(ev1)
ev1max = np.amax(ev1)
ev1[ev1 < 0] /= - ev1min
ev1[ev1 > 0] /= ev1max
ev2min = np.amin(ev2)
ev2max = np.amax(ev2)
ev2[ev2 < 0] /= - ev2min
ev2[ev2 > 0] /= ev2max
ev3min = np.amin(ev3)
ev3max = np.amax(ev3)
ev3[ev3 < 0] /= - ev3min
ev3[ev3 > 0] /= ev3max
# set evec as new coordinates (spectral embedding)
vn = np.empty(tria.v.shape)
vn[:, 0] = ev3
vn[:, 1] = ev1
vn[:, 2] = ev2
# do a few mean curvature flow euler steps to make more convex
# three should be sufficient
if flow_iter > 0:
tflow = tria_mean_curvature_flow(TriaMesh(vn, tria.t), max_iter=flow_iter)
vn = tflow.v
# project to sphere and scaled to have the same scale/origin as FS:
dist = np.sqrt(np.sum(vn * vn, axis=1))
vn = 100 * (vn / dist[:, np.newaxis])
trianew = TriaMesh(vn, tria.t)
svol = trianew.area() / (4.0 * math.pi * 10000)
print("sphere area fraction: {} ".format(svol))
flippedarea = get_flipped_area(trianew) / (4.0 * math.pi * 10000)
if flippedarea > 0.95:
print("ERROR: global normal flip, exiting ..")
raise ValueError('global normal flip')
print("flipped area fraction: {} ".format(flippedarea))
if svol < 0.99:
print("ERROR: sphere area fraction should be above .99, exiting ..")
raise ValueError('sphere area fraction should be above .99')
if flippedarea > 0.0008:
print("ERROR: flipped area fraction should be below .0008, exiting ..")
raise ValueError('flipped area fraction should be below .0008')
# here we finally check also the spat vol (orthogonality of direction vectors)
# we could stop earlier, but most failure cases will be covered by the svol and
# flipped area which can be better interpreted than spatvol
if spatvol < 0.6:
print("ERROR: spat vol (orthogonality) should be above .6, exiting ..")
raise ValueError('spat vol (orthogonality) should be above .6')
return trianew
def spherically_project_surface(insurf, outsurf):
""" (string) -> None
takes path to insurf, spherically projects it, outputs it to outsurf
"""
surf = read_geometry(insurf, read_metadata=True)
projected = tria_spherical_project(TriaMesh(surf[0], surf[1]), flow_iter=3)
fs.write_geometry(outsurf, projected.v, projected.t, volume_info=surf[2])
if __name__ == "__main__":
# Command Line options are error checking done here
options = options_parse()
surf_to_project = options.input_surf
projected_surf = options.output_surf
print("Reading in surface: {} ...".format(surf_to_project))
spherically_project_surface(surf_to_project, projected_surf)
print ("Outputing spherically projected surface: {}".format(projected_surf))
sys.exit(0)
| [
"lapy.FuncIO.export_ev",
"numpy.amin",
"numpy.cross",
"lapy.TriaMesh.TriaMesh",
"lapy.Solver.Solver",
"numpy.where",
"optparse.OptionParser",
"numpy.min",
"numpy.max",
"numpy.sum",
"lapy.read_geometry.read_geometry",
"numpy.empty",
"sys.exit",
"numpy.amax",
"nibabel.freesurfer.io.write_g... | [((2205, 2328), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'version': '"""$Id: spherically_project,v 1.1 2017/01/30 20:42:08 ltirrell Exp $"""', 'usage': 'HELPTEXT'}), "(version=\n '$Id: spherically_project,v 1.1 2017/01/30 20:42:08 ltirrell Exp $',\n usage=HELPTEXT)\n", (2226, 2328), False, 'import optparse\n'), ((4078, 4102), 'lapy.Solver.Solver', 'Solver', (['tria'], {'lump': '(False)'}), '(tria, lump=False)\n', (4084, 4102), False, 'from lapy.Solver import Solver\n'), ((7651, 7663), 'numpy.amin', 'np.amin', (['ev1'], {}), '(ev1)\n', (7658, 7663), True, 'import numpy as np\n'), ((7677, 7689), 'numpy.amax', 'np.amax', (['ev1'], {}), '(ev1)\n', (7684, 7689), True, 'import numpy as np\n'), ((7760, 7772), 'numpy.amin', 'np.amin', (['ev2'], {}), '(ev2)\n', (7767, 7772), True, 'import numpy as np\n'), ((7786, 7798), 'numpy.amax', 'np.amax', (['ev2'], {}), '(ev2)\n', (7793, 7798), True, 'import numpy as np\n'), ((7869, 7881), 'numpy.amin', 'np.amin', (['ev3'], {}), '(ev3)\n', (7876, 7881), True, 'import numpy as np\n'), ((7895, 7907), 'numpy.amax', 'np.amax', (['ev3'], {}), '(ev3)\n', (7902, 7907), True, 'import numpy as np\n'), ((8029, 8051), 'numpy.empty', 'np.empty', (['tria.v.shape'], {}), '(tria.v.shape)\n', (8037, 8051), True, 'import numpy as np\n'), ((8510, 8530), 'lapy.TriaMesh.TriaMesh', 'TriaMesh', (['vn', 'tria.t'], {}), '(vn, tria.t)\n', (8518, 8530), False, 'from lapy.TriaMesh import TriaMesh\n'), ((9840, 9881), 'lapy.read_geometry.read_geometry', 'read_geometry', (['insurf'], {'read_metadata': '(True)'}), '(insurf, read_metadata=True)\n', (9853, 9881), False, 'from lapy.read_geometry import read_geometry\n'), ((9966, 10039), 'nibabel.freesurfer.io.write_geometry', 'fs.write_geometry', (['outsurf', 'projected.v', 'projected.t'], {'volume_info': 'surf[2]'}), '(outsurf, projected.v, projected.t, volume_info=surf[2])\n', (9983, 10039), True, 'import nibabel.freesurfer.io as fs\n'), ((10453, 10464), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (10461, 10464), False, 'import sys\n'), ((2619, 2678), 'sys.exit', 'sys.exit', (['"""ERROR: Please specify input and output surfaces"""'], {}), "('ERROR: Please specify input and output surfaces')\n", (2627, 2678), False, 'import sys\n'), ((3875, 3897), 'numpy.cross', 'np.cross', (['v2mv1', 'v3mv1'], {}), '(v2mv1, v3mv1)\n', (3883, 3897), True, 'import numpy as np\n'), ((3916, 3939), 'numpy.sum', 'np.sum', (['(v1 * cr)'], {'axis': '(1)'}), '(v1 * cr, axis=1)\n', (3922, 3939), True, 'import numpy as np\n'), ((4537, 4564), 'lapy.FuncIO.export_ev', 'export_ev', (['data', '"""debug.ev"""'], {}), "(data, 'debug.ev')\n", (4546, 4564), False, 'from lapy.FuncIO import export_ev\n'), ((8428, 8451), 'numpy.sum', 'np.sum', (['(vn * vn)'], {'axis': '(1)'}), '(vn * vn, axis=1)\n', (8434, 8451), True, 'import numpy as np\n'), ((9921, 9947), 'lapy.TriaMesh.TriaMesh', 'TriaMesh', (['surf[0]', 'surf[1]'], {}), '(surf[0], surf[1])\n', (9929, 9947), False, 'from lapy.TriaMesh import TriaMesh\n'), ((7207, 7223), 'numpy.cross', 'np.cross', (['v2', 'v3'], {}), '(v2, v3)\n', (7215, 7223), True, 'import numpy as np\n'), ((8273, 8293), 'lapy.TriaMesh.TriaMesh', 'TriaMesh', (['vn', 'tria.t'], {}), '(vn, tria.t)\n', (8281, 8293), False, 'from lapy.TriaMesh import TriaMesh\n'), ((3970, 3993), 'numpy.sum', 'np.sum', (['(cr * cr)'], {'axis': '(1)'}), '(cr * cr, axis=1)\n', (3976, 3993), True, 'import numpy as np\n'), ((4023, 4044), 'numpy.where', 'np.where', (['(spatvol < 0)'], {}), '(spatvol < 0)\n', (4031, 4044), True, 'import numpy as np\n'), ((7066, 7081), 'numpy.sum', 'np.sum', (['(v1 * v1)'], {}), '(v1 * v1)\n', (7072, 7081), True, 'import numpy as np\n'), ((7113, 7128), 'numpy.sum', 'np.sum', (['(v2 * v2)'], {}), '(v2 * v2)\n', (7119, 7128), True, 'import numpy as np\n'), ((7160, 7175), 'numpy.sum', 'np.sum', (['(v3 * v3)'], {}), '(v3 * v3)\n', (7166, 7175), True, 'import numpy as np\n'), ((4796, 4807), 'numpy.max', 'np.max', (['ev1'], {}), '(ev1)\n', (4802, 4807), True, 'import numpy as np\n'), ((4855, 4866), 'numpy.min', 'np.min', (['ev1'], {}), '(ev1)\n', (4861, 4866), True, 'import numpy as np\n'), ((4936, 4947), 'numpy.max', 'np.max', (['ev2'], {}), '(ev2)\n', (4942, 4947), True, 'import numpy as np\n'), ((4995, 5006), 'numpy.min', 'np.min', (['ev2'], {}), '(ev2)\n', (5001, 5006), True, 'import numpy as np\n'), ((5076, 5087), 'numpy.max', 'np.max', (['ev3'], {}), '(ev3)\n', (5082, 5087), True, 'import numpy as np\n'), ((5135, 5146), 'numpy.min', 'np.min', (['ev3'], {}), '(ev3)\n', (5141, 5146), True, 'import numpy as np\n')] |
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that specifically target noisy expectation calculation."""
import numpy as np
from absl.testing import parameterized
import tensorflow as tf
import cirq
from tensorflow_quantum.core.ops import batch_util
from tensorflow_quantum.core.ops.noise import noisy_sampled_expectation_op
from tensorflow_quantum.python import util
class NoisyExpectationCalculationTest(tf.test.TestCase, parameterized.TestCase):
"""Tests tfq.noise.expectation."""
def test_noisy_expectation_inputs(self):
"""Make sure noisy expectation op fails gracefully on bad inputs."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size, include_channels=True)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, 3, batch_size)
num_samples = [[10]] * batch_size
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'programs must be rank 1'):
# Circuit tensor has too many dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor([circuit_batch]), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_names must be rank 1.'):
# symbol_names tensor has too many dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), np.array([symbol_names]),
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too many dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
np.array([symbol_values_array]),
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too few dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[0],
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too few dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(list(pauli_sums)), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too many dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
[util.convert_to_tensor([[x] for x in pauli_sums])],
num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'num_samples must be rank 2'):
# num_samples tensor has the wrong shape.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'num_samples must be rank 2'):
# num_samples tensor has the wrong shape.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
num_samples[0])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# circuit tensor has the right type but invalid values.
noisy_sampled_expectation_op.sampled_expectation(
['junk'] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type but invalid values.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), ['junk'],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'qubits not found in circuit'):
# pauli_sums tensor has the right type but invalid values.
new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]
new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size)
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in new_pauli_sums]),
num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# pauli_sums tensor has the right type but invalid values 2.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [['junk']] * batch_size, num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# circuits tensor has the wrong type.
noisy_sampled_expectation_op.sampled_expectation(
[1.0] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# symbol_names tensor has the wrong type.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), [0.1234],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):
# symbol_values tensor has the wrong type.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
[['junk']] * batch_size,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# pauli_sums tensor has the wrong type.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [[1.0]] * batch_size, num_samples)
with self.assertRaisesRegex(TypeError, 'missing'):
# we are missing an argument.
# pylint: disable=no-value-for-parameter
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, num_samples)
# pylint: enable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'positional arguments'):
# pylint: disable=too-many-function-args
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), [],
num_samples)
# pylint: enable=too-many-function-args
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong op size.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor([cirq.Circuit()]), symbol_names,
symbol_values_array.astype(np.float64),
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'greater than 0'):
# pylint: disable=too-many-function-args
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
[[-1]] * batch_size)
# pylint: enable=too-many-function-args
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong symbol_values size.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[:int(batch_size * 0.5)],
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
@parameterized.parameters([
{
'n_qubits': 13,
'batch_size': 1,
'noisy': False
}, # ComputeLarge.
{
'n_qubits': 6,
'batch_size': 25,
'noisy': False
}, # ComputeSmall.
{
'n_qubits': 6,
'batch_size': 10,
'noisy': True
}, # ComputeSmall.
{
'n_qubits': 8,
'batch_size': 1,
'noisy': True
} # ComputeLarge.
])
def test_simulate_consistency(self, batch_size, n_qubits, noisy):
"""Test consistency with batch_util.py simulation."""
symbol_names = ['alpha', 'beta']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size, include_channels=noisy)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums1 = util.random_pauli_sums(qubits, 3, batch_size)
pauli_sums2 = util.random_pauli_sums(qubits, 3, batch_size)
batch_pauli_sums = [[x, y] for x, y in zip(pauli_sums1, pauli_sums2)]
num_samples = [[10000] * 2] * batch_size
op_exps = noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(batch_pauli_sums), num_samples)
cirq_exps = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, batch_pauli_sums,
cirq.DensityMatrixSimulator() if noisy else cirq.Simulator())
tol = 0.5
self.assertAllClose(cirq_exps, op_exps, atol=tol, rtol=tol)
@parameterized.parameters([{
'channel': x
} for x in util.get_supported_channels()])
def test_single_channel(self, channel):
"""Individually test adding just a single channel type to circuits."""
symbol_names = []
batch_size = 5
n_qubits = 6
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_circuit_resolver_batch(
qubits, batch_size, include_channels=False)
for i in range(batch_size):
circuit_batch[i] = circuit_batch[i] + channel.on_each(*qubits)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums1 = util.random_pauli_sums(qubits, 3, batch_size)
pauli_sums2 = util.random_pauli_sums(qubits, 3, batch_size)
batch_pauli_sums = [[x, y] for x, y in zip(pauli_sums1, pauli_sums2)]
num_samples = [[20000] * 2] * batch_size
op_exps = noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(batch_pauli_sums), num_samples)
cirq_exps = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, batch_pauli_sums,
cirq.DensityMatrixSimulator())
self.assertAllClose(cirq_exps, op_exps, atol=0.35, rtol=0.35)
def test_correctness_empty(self):
"""Test the expectation for empty circuits."""
empty_circuit = util.convert_to_tensor([cirq.Circuit()])
empty_symbols = tf.convert_to_tensor([], dtype=tf.dtypes.string)
empty_values = tf.convert_to_tensor([[]])
empty_paulis = tf.convert_to_tensor([[]], dtype=tf.dtypes.string)
empty_n_samples = tf.convert_to_tensor([[]], dtype=tf.int32)
out = noisy_sampled_expectation_op.sampled_expectation(
empty_circuit, empty_symbols, empty_values, empty_paulis,
empty_n_samples)
expected = np.array([[]], dtype=np.complex64)
self.assertAllClose(out, expected)
def test_correctness_no_circuit(self):
"""Test the correctness with the empty tensor."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
empty_n_samples = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.int32)
out = noisy_sampled_expectation_op.sampled_expectation(
empty_circuit, empty_symbols, empty_values, empty_paulis,
empty_n_samples)
self.assertShapeEqual(np.zeros((0, 0)), out)
if __name__ == "__main__":
tf.test.main()
| [
"cirq.GridQubit.rect",
"tensorflow_quantum.core.ops.noise.noisy_sampled_expectation_op.sampled_expectation",
"tensorflow_quantum.python.util.convert_to_tensor",
"cirq.GridQubit",
"cirq.DensityMatrixSimulator",
"tensorflow_quantum.python.util.get_supported_channels",
"absl.testing.parameterized.parameter... | [((10987, 11226), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["[{'n_qubits': 13, 'batch_size': 1, 'noisy': False}, {'n_qubits': 6,\n 'batch_size': 25, 'noisy': False}, {'n_qubits': 6, 'batch_size': 10,\n 'noisy': True}, {'n_qubits': 8, 'batch_size': 1, 'noisy': True}]"], {}), "([{'n_qubits': 13, 'batch_size': 1, 'noisy': False},\n {'n_qubits': 6, 'batch_size': 25, 'noisy': False}, {'n_qubits': 6,\n 'batch_size': 10, 'noisy': True}, {'n_qubits': 8, 'batch_size': 1,\n 'noisy': True}])\n", (11011, 11226), False, 'from absl.testing import parameterized\n'), ((15738, 15752), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (15750, 15752), True, 'import tensorflow as tf\n'), ((1368, 1400), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (1387, 1400), False, 'import cirq\n'), ((1455, 1557), 'tensorflow_quantum.python.util.random_symbol_circuit_resolver_batch', 'util.random_symbol_circuit_resolver_batch', (['qubits', 'symbol_names', 'batch_size'], {'include_channels': '(True)'}), '(qubits, symbol_names, batch_size,\n include_channels=True)\n', (1496, 1557), False, 'from tensorflow_quantum.python import util\n'), ((1602, 1694), 'numpy.array', 'np.array', (['[[resolver[symbol] for symbol in symbol_names] for resolver in resolver_batch]'], {}), '([[resolver[symbol] for symbol in symbol_names] for resolver in\n resolver_batch])\n', (1610, 1694), True, 'import numpy as np\n'), ((1753, 1798), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['qubits', '(3)', 'batch_size'], {}), '(qubits, 3, batch_size)\n', (1775, 1798), False, 'from tensorflow_quantum.python import util\n'), ((11695, 11727), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (11714, 11727), False, 'import cirq\n'), ((11783, 11886), 'tensorflow_quantum.python.util.random_symbol_circuit_resolver_batch', 'util.random_symbol_circuit_resolver_batch', (['qubits', 'symbol_names', 'batch_size'], {'include_channels': 'noisy'}), '(qubits, symbol_names, batch_size,\n include_channels=noisy)\n', (11824, 11886), False, 'from tensorflow_quantum.python import util\n'), ((11931, 12023), 'numpy.array', 'np.array', (['[[resolver[symbol] for symbol in symbol_names] for resolver in resolver_batch]'], {}), '([[resolver[symbol] for symbol in symbol_names] for resolver in\n resolver_batch])\n', (11939, 12023), True, 'import numpy as np\n'), ((12083, 12128), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['qubits', '(3)', 'batch_size'], {}), '(qubits, 3, batch_size)\n', (12105, 12128), False, 'from tensorflow_quantum.python import util\n'), ((12151, 12196), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['qubits', '(3)', 'batch_size'], {}), '(qubits, 3, batch_size)\n', (12173, 12196), False, 'from tensorflow_quantum.python import util\n'), ((13152, 13184), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (13171, 13184), False, 'import cirq\n'), ((13240, 13318), 'tensorflow_quantum.python.util.random_circuit_resolver_batch', 'util.random_circuit_resolver_batch', (['qubits', 'batch_size'], {'include_channels': '(False)'}), '(qubits, batch_size, include_channels=False)\n', (13274, 13318), False, 'from tensorflow_quantum.python import util\n'), ((13479, 13571), 'numpy.array', 'np.array', (['[[resolver[symbol] for symbol in symbol_names] for resolver in resolver_batch]'], {}), '([[resolver[symbol] for symbol in symbol_names] for resolver in\n resolver_batch])\n', (13487, 13571), True, 'import numpy as np\n'), ((13631, 13676), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['qubits', '(3)', 'batch_size'], {}), '(qubits, 3, batch_size)\n', (13653, 13676), False, 'from tensorflow_quantum.python import util\n'), ((13699, 13744), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['qubits', '(3)', 'batch_size'], {}), '(qubits, 3, batch_size)\n', (13721, 13744), False, 'from tensorflow_quantum.python import util\n'), ((14525, 14573), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[]'], {'dtype': 'tf.dtypes.string'}), '([], dtype=tf.dtypes.string)\n', (14545, 14573), True, 'import tensorflow as tf\n'), ((14597, 14623), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[]]'], {}), '([[]])\n', (14617, 14623), True, 'import tensorflow as tf\n'), ((14647, 14697), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[]]'], {'dtype': 'tf.dtypes.string'}), '([[]], dtype=tf.dtypes.string)\n', (14667, 14697), True, 'import tensorflow as tf\n'), ((14724, 14766), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[]]'], {'dtype': 'tf.int32'}), '([[]], dtype=tf.int32)\n', (14744, 14766), True, 'import tensorflow as tf\n'), ((14782, 14909), 'tensorflow_quantum.core.ops.noise.noisy_sampled_expectation_op.sampled_expectation', 'noisy_sampled_expectation_op.sampled_expectation', (['empty_circuit', 'empty_symbols', 'empty_values', 'empty_paulis', 'empty_n_samples'], {}), '(empty_circuit,\n empty_symbols, empty_values, empty_paulis, empty_n_samples)\n', (14830, 14909), False, 'from tensorflow_quantum.core.ops.noise import noisy_sampled_expectation_op\n'), ((14951, 14985), 'numpy.array', 'np.array', (['[[]]'], {'dtype': 'np.complex64'}), '([[]], dtype=np.complex64)\n', (14959, 14985), True, 'import numpy as np\n'), ((15155, 15200), 'tensorflow.raw_ops.Empty', 'tf.raw_ops.Empty', ([], {'shape': '(0,)', 'dtype': 'tf.string'}), '(shape=(0,), dtype=tf.string)\n', (15171, 15200), True, 'import tensorflow as tf\n'), ((15225, 15270), 'tensorflow.raw_ops.Empty', 'tf.raw_ops.Empty', ([], {'shape': '(0,)', 'dtype': 'tf.string'}), '(shape=(0,), dtype=tf.string)\n', (15241, 15270), True, 'import tensorflow as tf\n'), ((15294, 15342), 'tensorflow.raw_ops.Empty', 'tf.raw_ops.Empty', ([], {'shape': '(0, 0)', 'dtype': 'tf.float32'}), '(shape=(0, 0), dtype=tf.float32)\n', (15310, 15342), True, 'import tensorflow as tf\n'), ((15366, 15413), 'tensorflow.raw_ops.Empty', 'tf.raw_ops.Empty', ([], {'shape': '(0, 0)', 'dtype': 'tf.string'}), '(shape=(0, 0), dtype=tf.string)\n', (15382, 15413), True, 'import tensorflow as tf\n'), ((15440, 15486), 'tensorflow.raw_ops.Empty', 'tf.raw_ops.Empty', ([], {'shape': '(0, 0)', 'dtype': 'tf.int32'}), '(shape=(0, 0), dtype=tf.int32)\n', (15456, 15486), True, 'import tensorflow as tf\n'), ((15502, 15629), 'tensorflow_quantum.core.ops.noise.noisy_sampled_expectation_op.sampled_expectation', 'noisy_sampled_expectation_op.sampled_expectation', (['empty_circuit', 'empty_symbols', 'empty_values', 'empty_paulis', 'empty_n_samples'], {}), '(empty_circuit,\n empty_symbols, empty_values, empty_paulis, empty_n_samples)\n', (15550, 15629), False, 'from tensorflow_quantum.core.ops.noise import noisy_sampled_expectation_op\n'), ((6641, 6690), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['new_qubits', '(2)', 'batch_size'], {}), '(new_qubits, 2, batch_size)\n', (6663, 6690), False, 'from tensorflow_quantum.python import util\n'), ((12405, 12442), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (12427, 12442), False, 'from tensorflow_quantum.python import util\n'), ((12503, 12543), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['batch_pauli_sums'], {}), '(batch_pauli_sums)\n', (12525, 12543), False, 'from tensorflow_quantum.python import util\n'), ((13953, 13990), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (13975, 13990), False, 'from tensorflow_quantum.python import util\n'), ((14051, 14091), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['batch_pauli_sums'], {}), '(batch_pauli_sums)\n', (14073, 14091), False, 'from tensorflow_quantum.python import util\n'), ((14240, 14269), 'cirq.DensityMatrixSimulator', 'cirq.DensityMatrixSimulator', ([], {}), '()\n', (14267, 14269), False, 'import cirq\n'), ((15682, 15698), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {}), '((0, 0))\n', (15690, 15698), True, 'import numpy as np\n'), ((2106, 2145), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[circuit_batch]'], {}), '([circuit_batch])\n', (2128, 2145), False, 'from tensorflow_quantum.python import util\n'), ((2214, 2263), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (2236, 2263), False, 'from tensorflow_quantum.python import util\n'), ((2553, 2590), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (2575, 2590), False, 'from tensorflow_quantum.python import util\n'), ((2592, 2616), 'numpy.array', 'np.array', (['[symbol_names]'], {}), '([symbol_names])\n', (2600, 2616), True, 'import numpy as np\n'), ((2671, 2720), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (2693, 2720), False, 'from tensorflow_quantum.python import util\n'), ((3018, 3055), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (3040, 3055), False, 'from tensorflow_quantum.python import util\n'), ((3087, 3118), 'numpy.array', 'np.array', (['[symbol_values_array]'], {}), '([symbol_values_array])\n', (3095, 3118), True, 'import numpy as np\n'), ((3136, 3185), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (3158, 3185), False, 'from tensorflow_quantum.python import util\n'), ((3482, 3519), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (3504, 3519), False, 'from tensorflow_quantum.python import util\n'), ((3591, 3640), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (3613, 3640), False, 'from tensorflow_quantum.python import util\n'), ((3925, 3962), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (3947, 3962), False, 'from tensorflow_quantum.python import util\n'), ((4357, 4394), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (4379, 4394), False, 'from tensorflow_quantum.python import util\n'), ((4813, 4850), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (4835, 4850), False, 'from tensorflow_quantum.python import util\n'), ((4919, 4968), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (4941, 4968), False, 'from tensorflow_quantum.python import util\n'), ((5269, 5306), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (5291, 5306), False, 'from tensorflow_quantum.python import util\n'), ((5375, 5424), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (5397, 5424), False, 'from tensorflow_quantum.python import util\n'), ((5805, 5854), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (5827, 5854), False, 'from tensorflow_quantum.python import util\n'), ((6168, 6205), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (6190, 6205), False, 'from tensorflow_quantum.python import util\n'), ((6270, 6319), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (6292, 6319), False, 'from tensorflow_quantum.python import util\n'), ((6568, 6588), 'cirq.GridQubit', 'cirq.GridQubit', (['(5)', '(5)'], {}), '(5, 5)\n', (6582, 6588), False, 'import cirq\n'), ((6590, 6610), 'cirq.GridQubit', 'cirq.GridQubit', (['(9)', '(9)'], {}), '(9, 9)\n', (6604, 6610), False, 'import cirq\n'), ((6769, 6806), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (6791, 6806), False, 'from tensorflow_quantum.python import util\n'), ((6875, 6928), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in new_pauli_sums]'], {}), '([[x] for x in new_pauli_sums])\n', (6897, 6928), False, 'from tensorflow_quantum.python import util\n'), ((7237, 7274), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (7259, 7274), False, 'from tensorflow_quantum.python import util\n'), ((7631, 7680), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (7653, 7680), False, 'from tensorflow_quantum.python import util\n'), ((7894, 7931), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (7916, 7931), False, 'from tensorflow_quantum.python import util\n'), ((7996, 8045), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (8018, 8045), False, 'from tensorflow_quantum.python import util\n'), ((8265, 8302), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (8287, 8302), False, 'from tensorflow_quantum.python import util\n'), ((8375, 8424), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (8397, 8424), False, 'from tensorflow_quantum.python import util\n'), ((8636, 8673), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (8658, 8673), False, 'from tensorflow_quantum.python import util\n'), ((8994, 9031), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (9016, 9031), False, 'from tensorflow_quantum.python import util\n'), ((9353, 9390), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (9375, 9390), False, 'from tensorflow_quantum.python import util\n'), ((9459, 9508), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (9481, 9508), False, 'from tensorflow_quantum.python import util\n'), ((9967, 10016), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (9989, 10016), False, 'from tensorflow_quantum.python import util\n'), ((10286, 10323), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (10308, 10323), False, 'from tensorflow_quantum.python import util\n'), ((10392, 10441), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (10414, 10441), False, 'from tensorflow_quantum.python import util\n'), ((10787, 10824), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (10809, 10824), False, 'from tensorflow_quantum.python import util\n'), ((10917, 10966), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (10939, 10966), False, 'from tensorflow_quantum.python import util\n'), ((12692, 12721), 'cirq.DensityMatrixSimulator', 'cirq.DensityMatrixSimulator', ([], {}), '()\n', (12719, 12721), False, 'import cirq\n'), ((12736, 12752), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (12750, 12752), False, 'import cirq\n'), ((12910, 12939), 'tensorflow_quantum.python.util.get_supported_channels', 'util.get_supported_channels', ([], {}), '()\n', (12937, 12939), False, 'from tensorflow_quantum.python import util\n'), ((14484, 14498), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (14496, 14498), False, 'import cirq\n'), ((4464, 4513), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[x] for x in pauli_sums]'], {}), '([[x] for x in pauli_sums])\n', (4486, 4513), False, 'from tensorflow_quantum.python import util\n'), ((9863, 9877), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (9875, 9877), False, 'import cirq\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple Python microbenchmarking library."""
from collections import OrderedDict
from numbers import Number
import os
import time
from typing import Any, Optional, Union, Callable, List, Dict
import numpy as onp
from tabulate import tabulate
from jax.util import safe_zip
def benchmark(f: Callable[[], Any], iters: Optional[int] = None,
warmup: Optional[int] = None, name: Optional[str] = None,
target_total_secs: Optional[Union[int, float]] = None):
"""Benchmarks ``f``. Prints the results and returns the raw times.
Args:
f: The function to be benchmarked. Should take no arguments.
iters: The number of iterations to run for. If none, runs until
``target_total_secs`` has elapsed.
warmup: The number of warmup (untimed) iterations to run for.
name: The name of the benchmark. Defaults to f.__name__.
target_total_secs: If ``iters`` isn't specified, the minimum number of
seconds to run for. Defaults to the env var TARGET_TOTAL_SECS or 10 if
not set.
Returns:
An ndarray containing the number of seconds each iteration ran for.
"""
if target_total_secs is None:
target_total_secs = int(os.getenv("TARGET_TOTAL_SECS", "10"))
if warmup is None:
if iters is None:
warmup = 1
else:
warmup = onp.clip(1, iters // 10, 10)
for _ in range(warmup):
f()
times = []
count = 0
while (count < iters if iters is not None
else sum(times) < target_total_secs):
start = time.time()
f()
end = time.time()
times.append(end - start)
count += 1
times = onp.array(times)
print("---------Benchmark results for %s---------" % (name or f.__name__))
print("mean=%f std=%f %%std=%f total=%f" %
(times.mean(), times.std(), _pstd(times), times.sum()))
print("#iters=%d #warmup=%d" % (count, warmup))
print()
return times
def benchmark_suite(prepare: Callable[..., Callable], params_list: List[Dict],
name: str, target_total_secs: int = None):
"""Benchmarks a function for several combinations of parameters.
Prints the summarized results in a table..
Args:
prepare: given kwargs returns a benchmark function specialized to the kwargs.
params_list: a list of kwargs on which to run the benchmark.
name: the name of this benchmark suite
target_total_secs: the ``target_total_secs`` to pass to ``benchmark``.
"""
# Sort parameters alphabetically so benchmark results print consistently.
params_list = [OrderedDict(sorted(p.items())) for p in params_list]
assert all(p.keys() == params_list[0].keys() for p in params_list)
times = []
for params in params_list:
f = prepare(**params)
subname = name + "".join("_%s=%s" % (n, p) for n, p in params.items())
times.append(benchmark(f, name=subname,
target_total_secs=target_total_secs))
print("---------Benchmark summary for %s---------" % name)
param_names = list(params_list[0].keys())
print(tabulate([tuple(params.values()) +
(t.mean(), _pstd(t), t.mean() / times[0].mean())
for params, t in safe_zip(params_list, times)],
param_names + ["mean", "%std", "relative"]))
print()
def _pstd(x):
return x.std() / x.mean() * 100
| [
"numpy.clip",
"os.getenv",
"jax.util.safe_zip",
"numpy.array",
"time.time"
] | [((2173, 2189), 'numpy.array', 'onp.array', (['times'], {}), '(times)\n', (2182, 2189), True, 'import numpy as onp\n'), ((2075, 2086), 'time.time', 'time.time', ([], {}), '()\n', (2084, 2086), False, 'import time\n'), ((2105, 2116), 'time.time', 'time.time', ([], {}), '()\n', (2114, 2116), False, 'import time\n'), ((1759, 1795), 'os.getenv', 'os.getenv', (['"""TARGET_TOTAL_SECS"""', '"""10"""'], {}), "('TARGET_TOTAL_SECS', '10')\n", (1768, 1795), False, 'import os\n'), ((1883, 1911), 'numpy.clip', 'onp.clip', (['(1)', '(iters // 10)', '(10)'], {}), '(1, iters // 10, 10)\n', (1891, 1911), True, 'import numpy as onp\n'), ((3706, 3734), 'jax.util.safe_zip', 'safe_zip', (['params_list', 'times'], {}), '(params_list, times)\n', (3714, 3734), False, 'from jax.util import safe_zip\n')] |
# coding: utf-8
import chainer
class Range(chainer.Chain):
def forward(self, x):
return range(x)
class RangeStop(chainer.Chain):
def forward(self, x, y):
return range(x, y)
class RangeStep(chainer.Chain):
def forward(self, x, y, z):
return range(x, y, z)
class RangeListComp(chainer.Chain):
def forward(self, xs, ps, p):
y1 = [xs[x, x+2] for x in range(p)]
y2 = [xs[ps[x], ps[x]+3] for x in range(p)]
return y1, y2
# ======================================
from chainer_compiler import ch2o
import numpy as np
if __name__ == '__main__':
ch2o.generate_testcase(Range, [5])
ch2o.generate_testcase(RangeStop(), [5, 8], subname='stop')
ch2o.generate_testcase(RangeStep(), [5, 19, 2], subname='step')
wn = 5
v = np.random.rand(10, 20).astype(np.float32)
w = np.random.randint(0, 5, size=wn)
p = np.int64(wn)
ch2o.generate_testcase(RangeListComp, [v, w, p], subname='list_comp')
| [
"chainer_compiler.ch2o.generate_testcase",
"numpy.int64",
"numpy.random.rand",
"numpy.random.randint"
] | [((618, 652), 'chainer_compiler.ch2o.generate_testcase', 'ch2o.generate_testcase', (['Range', '[5]'], {}), '(Range, [5])\n', (640, 652), False, 'from chainer_compiler import ch2o\n'), ((857, 889), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {'size': 'wn'}), '(0, 5, size=wn)\n', (874, 889), True, 'import numpy as np\n'), ((898, 910), 'numpy.int64', 'np.int64', (['wn'], {}), '(wn)\n', (906, 910), True, 'import numpy as np\n'), ((915, 984), 'chainer_compiler.ch2o.generate_testcase', 'ch2o.generate_testcase', (['RangeListComp', '[v, w, p]'], {'subname': '"""list_comp"""'}), "(RangeListComp, [v, w, p], subname='list_comp')\n", (937, 984), False, 'from chainer_compiler import ch2o\n'), ((807, 829), 'numpy.random.rand', 'np.random.rand', (['(10)', '(20)'], {}), '(10, 20)\n', (821, 829), True, 'import numpy as np\n')] |
#####################################################
# Title: HTML parse- and analyser
# Author: <NAME> (<EMAIL>)
# Licence: GPLv2
#####################################################
#!/usr/bin/python
import sys
import sqlite3
import datetime
import timeit
import math
import re
import pandas as pd
import numpy as np
from time import time, sleep
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
#from sklearn.naive_bayes import *
from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_selection import SelectKBest, chi2, VarianceThreshold
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn import tree
from mglearn import make_blobs
import matplotlib.pyplot as plt
import graphviz
'''
OPEN UP DATABASE AND FETCH DATA
'''
def connect_to_database(action, training_db, urls, unknown_samples, sha256):
# Open up training data set
training_db_connection = ""
training_db_cursor = ""
clfnb = MultinomialNB()
clfrf = RandomForestClassifier(random_state=0)
if action == False:
try:
# Connect to training set database
training_db_connection = sqlite3.connect(str(training_db))
training_db_cursor = training_db_connection.cursor()
# Queries for retrieving data to analyse
sql_reg_keys_query = "SELECT sha256, path FROM reg_keys;"
sql_strings_query = "SELECT strings FROM strings;"
training_db_cursor.execute(sql_reg_keys_query)
reg_key_pairs = training_db_cursor.fetchall()
reg_keys_dict = {}
unknown_samples_dict = {}
cur_sha = ""
cur_class_label = 3
class_label=0
reg_keys_list = []
dns_list = []
api_list = []
dll_list = []
tor_related = int(0)
api_string = ""
reg_keys_string = ""
dns_string =""
counter = 0
counter_length = len(reg_key_pairs)
reg_keys_combined = {}
unknown_samples_combined = {}
print("Fetching data from database. Processing.")
for pair in reg_key_pairs:
counter += 1
# Print progress
if counter % 100 == 0:
sys.stdout.write(".")
sys.stdout.flush()
if counter == (math.ceil(0.1 * counter_length)):
print("10%")
if counter == (math.ceil(0.2* counter_length)):
print("20%")
if counter == (math.ceil(0.5 * counter_length)):
print("50%")
if counter == (math.ceil(0.7 * counter_length)):
print("70%")
if counter == (math.ceil(0.8 * counter_length)):
print("80%")
if counter == (math.ceil(0.9 * counter_length)):
print("90%")
if counter == (math.ceil(0.95 * counter_length)):
print("95%")
if cur_sha != pair[0]:
cur_sha = pair[0]
reg_keys_list = []
api_list = []
dll_list = []
api_string = ""
dll_string = ""
dns_string = ""
reg_keys_string = ""
class_label =[]
else:
reg_keys_list.append(pair[1])
dns_query = "SELECT dns FROM network WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(dns_query)
dns_list = training_db_cursor.fetchall()
api_query = "SELECT name,tor_related FROM api_calls WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(api_query)
api_list = training_db_cursor.fetchall()
dll_query = "SELECT name FROM dlls WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(dll_query)
dll_list = training_db_cursor.fetchall()
class_query = "SELECT tor_related FROM label WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(class_query)
class_label = training_db_cursor.fetchall()
# Append data from database
api_string = "".join(str(api_list))
reg_keys_string = "".join(str(reg_keys_list))
dns_string = "".join(str(dns_list))
dll_string = "".join(str(dll_list))
# If 1 or 0, samples are correctly classified. 2 are prediction candidates.
if class_label:
if 0 in class_label[0]:
tor_related = int(0)
reg_keys_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
reg_keys_combined.update({cur_sha : [reg_keys_string + " " + dns_string + " " + dll_string + " " + api_string, tor_related]})
if 1 in class_label[0]:
tor_related = int(1)
reg_keys_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
reg_keys_combined.update({cur_sha : [reg_keys_string + " " + dns_string + " " + dll_string + " " + api_string, tor_related]})
if 2 in class_label[0]:
tor_related = int(2)
unknown_samples_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
unknown_samples_combined.update({cur_sha : [reg_keys_string + " " + dns_string + dll_string + " " + api_string, tor_related]})
# Construct data frames from the feature dictionaries
training_df2 = pd.DataFrame(reg_keys_dict).T
training_df3 = pd.DataFrame(reg_keys_combined).T
# Construct a data frame for the unknown sample to be classified as well
unknown_df2 = pd.DataFrame(unknown_samples_dict).T
unknown_df3 = pd.DataFrame(unknown_samples_combined).T
# predictions_SHA256_list = build_classifiers(training_df2, training_df3, unknown_df2, unknown_df3)
predictions_SHA256_list = build_classifiers(training_df2, training_df3, unknown_df2, unknown_df3)
# If URLs flag enabled, go fetch URLs
if urls == True:
unique_onion_urls = []
print("|-- Tor Malware\n", predictions_SHA256_list)
for prediction_SHA256 in predictions_SHA256_list:
strings_query = "SELECT strings FROM strings WHERE sha256=\'" + prediction_SHA256 + "\';"
dns_query = "SELECT dns FROM network WHERE sha256=\'" + prediction_SHA256 + "\';"
training_db_cursor.execute(strings_query)
predicted_strings = training_db_cursor.fetchall()
# Find .onion URL
for onion_url in predicted_strings:
for string in onion_url:
#tmp_list = re.findall("http[s]?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+", string)
#tmp_list = re.findall("(\w+)://([\w\-\.]+)/(\w+).(\w+)", string)
tmp_list = re.findall(r"(?<=\.)([^.]+)(?:\.(?:onion|[^.]+(?:$|\n)))", string)
for i in tmp_list:
if i not in unique_onion_urls:
unique_onion_urls.append(i)
print("|--- Onion URLs \n", unique_onion_urls)
# Close DB connection
training_db_connection.commit()
training_db_connection.close()
except sqlite3.Error as err:
print("Sqlite error:", err)
finally:
training_db_connection.close()
"""
BUILD CLASSIFICATION MODELS
"""
def build_classifiers(df2, df3, unknown_df2, unknown_df3):
# Create bag of words for label:
vect = CountVectorizer(lowercase=False)
vect.fit_transform(df3[0])
X = vect.transform(df3[0])
# If there are unknown samples, make predictions on them.
X_unknown = vect.transform(unknown_df3[0])
# unknown_samples_SHA256 = df3[0].index
#X = pd.DataFrame(X_cand, columns=vect.get_feature_names())
# Target/class labels
y = df2[4]
y = y.astype('int')
# Feature selection
selector = VarianceThreshold(threshold=12)
selector.fit_transform(X)
# 80 / 20 split training and testing data. Shuffle just in case.
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=True, test_size=0.2)
y_train = y_train.astype('int')
y_test = y_test.astype('int')
# Naive Bayes
mnb = MultinomialNB()
nb_clf = mnb.fit(X_train.toarray(), y_train.to_numpy())
mnb_prediction = nb_clf.predict(X_test.toarray())
mnb_proba = nb_clf.predict_proba(X_test)[:, 1]
mnb_cross_validation_scores = cross_validate(nb_clf, X_test.toarray(), y_test.to_numpy(), cv=5, scoring=["accuracy", "f1", "recall", "precision", "roc_auc"], n_jobs=-1, return_train_score=True)
mnb_cross_validation_score = cross_val_score(nb_clf, X_test.toarray(), y_test.to_numpy(), cv=5, scoring="accuracy")
mnb_roc_auc_avg = roc_auc_score(y_test, mnb_prediction)
mnb_balanced_accuracy = balanced_accuracy_score(y_test, mnb_prediction)
mnb_precision, mnb_recall, mnb_threshold = precision_recall_curve(y_test, nb_clf.predict(X_test.toarray()))
mnb_fpr = dict()
mnb_tpr = dict()
mnb_roc_auc = dict()
mnb_fpr[0], mnb_tpr[0], _ = roc_curve(y_test, mnb_proba)
mnb_roc_auc[0] = auc(mnb_fpr[0], mnb_tpr[0])
# Compute micro-average ROC curve and ROC area
mnb_fpr["micro"], mnb_tpr["micro"], _ = roc_curve(y_test.ravel(), mnb_proba.ravel())
mnb_roc_auc["micro"] = auc(mnb_fpr["micro"], mnb_tpr["micro"])
print("\n | ---- MNB cross validation score: ", mnb_cross_validation_score.mean())
print(classification_report(y_test, mnb_prediction))
# Support Vector Machine
clf = svm.SVC(C=2, cache_size=9000, probability=True).fit(X_train, y_train)
svm_proba = clf.predict_proba(X_test)[:, 1]
svm_prediction = clf.predict(X_test)
svm_unknown_sample_predicition = clf.predict(X_unknown)
svm_y_score = clf.decision_function(X_test)
svm_roc_auc_avg = roc_auc_score(y_test, svm_prediction)
svm_cross_validation_scores = cross_validate(clf, X_test, y_test, cv=5, scoring=["accuracy", "balanced_accuracy","precision","f1","recall","roc_auc"], return_train_score=True)
svm_cross_validation_score = cross_val_score(clf, X_test, y_test, cv=5, scoring="accuracy")
svm_precision, svm_recall, svm_threshold = precision_recall_curve(y_test, clf.decision_function(X_test))
svm_close_zero = np.argmin(np.abs(svm_threshold))
svm_fpr = dict()
svm_tpr = dict()
svm_roc_auc = dict()
#svm_fpr[0], svm_tpr[0], _ = roc_curve(y_test, svm_prediction)
svm_fpr[0], svm_tpr[0], _ = roc_curve(y_test, svm_proba)
#svm_fpr[1], svm_tpr[1], _ = roc_curve(y_test[:,1], svm_y_score[:, 1])
svm_roc_auc[0] = auc(svm_fpr[0], svm_tpr[0])
# Compute micro-average ROC curve and ROC area
svm_fpr["micro"], svm_tpr["micro"], _ = roc_curve(y_test.ravel(), svm_proba.ravel())
svm_roc_auc["micro"] = auc(svm_fpr["micro"], svm_tpr["micro"])
print("\n\n|---- SVM 10-fold cross validation accuracy score:{}".format(np.mean(svm_cross_validation_score)))
# Logistic regression classifier
logreg = LogisticRegression(max_iter=4000).fit(X_train, y_train)
lr_prediction = logreg.predict(X_test)
lr_unknown_predictions = logreg.predict(X_unknown)
lr_proba = logreg.predict_proba(X_test)[:, 1]
lr_decision_function = logreg.decision_function(X_test)
lr_cross_validation_scores = cross_validate(logreg, X_test, y_test, cv=5 , scoring=["accuracy", "balanced_accuracy", "precision", "f1", "recall","roc_auc"], n_jobs=-1, return_train_score=True)
lr_cross_validation_score = cross_val_score(logreg, X_test, y_test, cv=5 , scoring="accuracy")
lr_roc_auc = roc_auc_score(y_test, lr_prediction)
lr_fpr = dict()
lr_tpr = dict()
lr_roc_auc = dict()
lr_fpr[0], lr_tpr[0], _ = roc_curve(y_test, lr_proba)
lr_roc_auc[0] = auc(lr_fpr[0], lr_tpr[0])
lr_fpr["micro"], lr_tpr["micro"], _ = roc_curve(y_test.ravel(), lr_proba.ravel())
lr_roc_auc["micro"] = auc(lr_fpr["micro"], lr_tpr["micro"])
average_precision = average_precision_score(y_test, lr_decision_function)
precision, recall, threshold = precision_recall_curve(y_test, lr_decision_function)
precision1, recall1, f1, supp = precision_recall_fscore_support(y_test, lr_prediction, average="weighted", zero_division=1)
print("\n\n|---- LR 10-fold cross validation accuracy score:{}".format(np.mean(lr_cross_validation_score)))
print(classification_report(y_test, lr_prediction, zero_division=1))
# Random forest classifier
rf_clf = RandomForestClassifier(max_depth=2, random_state=0)
rf_clf.fit(X_train, y_train)
rf_prediction = rf_clf.predict(X_test)
rf_unknown_prediction = rf_clf.predict(X_unknown)
rf_proba = rf_clf.predict_proba(X_test)[:, 1]
rf_fpr = dict()
rf_tpr = dict()
rf_roc_auc = dict()
rf_fpr[0], rf_tpr[0], _ = roc_curve(y_test, rf_prediction)
rf_roc_auc[0] = auc(rf_fpr[0], rf_tpr[0])
rf_fpr["micro"], rf_tpr["micro"], _ = roc_curve(y_test.ravel(), rf_prediction.ravel())
rf_roc_auc["micro"] = auc(rf_fpr["micro"], rf_tpr["micro"])
rf_precision, rf_recall, rf_threshold = precision_recall_curve(y_test, rf_prediction)
rf_cross_validation_score = cross_val_score(rf_clf, X_test, y_test, cv=5 , scoring="accuracy")
print("\n\n|---- RF 10-fold cross validation accuracy score: {}", rf_cross_validation_score.mean())
print(classification_report(y_test,rf_prediction))
# Decision tree classifier
dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_train, y_train)
dt_prediction = dt_clf.predict(X_test)
dt_unknown_prediction = dt_clf.predict(X_unknown)
dt_proba = dt_clf.predict_proba(X_test)[:, 1]
dt_fpr = dict()
dt_tpr = dict()
dt_roc_auc = dict()
dt_fpr[0], dt_tpr[0], _ = roc_curve(y_test, dt_prediction)
dt_roc_auc[0] = auc(dt_fpr[0], dt_tpr[0])
dt_fpr["micro"], dt_tpr["micro"], _ = roc_curve(y_test.ravel(), dt_prediction.ravel())
dt_roc_auc["micro"] = auc(dt_fpr["micro"], dt_tpr["micro"])
dt_precision, dt_recall, dt_threshold = precision_recall_curve(y_test, dt_prediction)
dt_cross_validation_score = cross_val_score(dt_clf, X_test, y_test, cv=5 , scoring="accuracy")
print("\n\n|---- DT 10-fold cross validation accuracy score:{} ", dt_cross_validation_score.mean())
print("\nDT score: ", dt_clf.score(X_test, y_test), "\nDT classification report\n\n", classification_report(y_test, dt_prediction), export_text(dt_clf, show_weights=True))
print("DT y_predictions: ", dt_prediction, "y_test: ", y_test)
# Verify predictions with the true labels
verified_predictions_SHA256_list = verify_predictions(dt_prediction, y_test)
# Unseen samples predictions
"""
# Draw AuC RoC
roc_plt = plt
roc_plt.figure()
lw = 2
roc_plt.plot(svm_fpr[0], svm_tpr[0], color='red', lw=lw, label='Support vector machine ROC curve (area = %0.2f)' % svm_roc_auc[0])
roc_plt.plot(lr_fpr[0], lr_tpr[0], color='yellow', lw=lw, label='Logistic regression ROC curve (area = %0.2f)' % lr_roc_auc[0])
roc_plt.plot(mnb_fpr[0], mnb_tpr[0], color='green', lw=lw, label='Multinomial naive Bayes ROC curve (area = %0.2f)' % mnb_roc_auc[0])
roc_plt.plot(rf_fpr[0], rf_tpr[0], color='blue', lw=lw, label='Random Forest ROC curve (area = %0.2f)' % rf_roc_auc[0])
roc_plt.plot(dt_fpr[0], dt_tpr[0], color='purple', lw=lw, label='Decision tree ROC curve (area = %0.2f)' % dt_roc_auc[0])
roc_plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
roc_plt.xlim([0.0, 1.0])
roc_plt.ylim([0.0, 1.05])
roc_plt.xlabel('False Positive Rate')
roc_plt.ylabel('True Positive Rate')
roc_plt.title('Receiver operating characteristic.')
roc_plt.legend(loc="lower right")
roc_plt.grid(True)
#fig_file = str(datetime.datetime.now() + ".png"
roc_plt.savefig("roc.tiff", format="tiff")
# Plot precision and recall graph
plt.plot(precision, recall, label="Logistic regression")
plt.plot(svm_precision, svm_recall, label="Support vector machine")
plt.plot(mnb_precision, mnb_recall, label="Multinomial naive Bayes")
plt.plot(rf_precision, rf_recall, label="Random forest")
plt.plot(dt_precision, dt_recall, label="Decision tree")
plt.xlabel("Precision")
plt.ylabel("Recall")
plt.legend(loc="best")
fig2_file = str(datetime.datetime.now()) + ".tiff"
plt.savefig(fig2_file, format="tiff")
"""
return verified_predictions_SHA256_list
def verify_predictions(X_predictions_list, y_true):
counter = 0;
X_prediction = int(X_predictions_list[counter])
verified_predictions_SHA256_list = []
for y_index, y_value in y_true.items():
if X_prediction == y_value:
print("|--- Prediction matches the true label on file with SHA256: ", y_index)
verified_predictions_SHA256_list.append(y_index)
counter += 1
return verified_predictions_SHA256_list
# Constructor
if __name__ == "__main__":
arguments = docopt(__doc__, version='retomos 0.1')
main(arguments)
| [
"sklearn.feature_selection.VarianceThreshold",
"sklearn.metrics.balanced_accuracy_score",
"sklearn.metrics.classification_report",
"sklearn.metrics.auc",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.roc_curve",
"numpy.mean",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.tree.Decis... | [((1454, 1469), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (1467, 1469), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((1483, 1521), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1505, 1521), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((8896, 8928), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'lowercase': '(False)'}), '(lowercase=False)\n', (8911, 8928), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((9329, 9360), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', ([], {'threshold': '(12)'}), '(threshold=12)\n', (9346, 9360), False, 'from sklearn.feature_selection import SelectKBest, chi2, VarianceThreshold\n'), ((9504, 9555), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'shuffle': '(True)', 'test_size': '(0.2)'}), '(X, y, shuffle=True, test_size=0.2)\n', (9520, 9555), False, 'from sklearn.model_selection import cross_val_score, cross_validate, train_test_split\n'), ((9660, 9675), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (9673, 9675), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((10187, 10224), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'mnb_prediction'], {}), '(y_test, mnb_prediction)\n', (10200, 10224), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((10254, 10301), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test', 'mnb_prediction'], {}), '(y_test, mnb_prediction)\n', (10277, 10301), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((10518, 10546), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'mnb_proba'], {}), '(y_test, mnb_proba)\n', (10527, 10546), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((10569, 10596), 'sklearn.metrics.auc', 'auc', (['mnb_fpr[0]', 'mnb_tpr[0]'], {}), '(mnb_fpr[0], mnb_tpr[0])\n', (10572, 10596), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((10767, 10806), 'sklearn.metrics.auc', 'auc', (["mnb_fpr['micro']", "mnb_tpr['micro']"], {}), "(mnb_fpr['micro'], mnb_tpr['micro'])\n", (10770, 10806), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((11293, 11330), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'svm_prediction'], {}), '(y_test, svm_prediction)\n', (11306, 11330), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((11366, 11523), 'sklearn.model_selection.cross_validate', 'cross_validate', (['clf', 'X_test', 'y_test'], {'cv': '(5)', 'scoring': "['accuracy', 'balanced_accuracy', 'precision', 'f1', 'recall', 'roc_auc']", 'return_train_score': '(True)'}), "(clf, X_test, y_test, cv=5, scoring=['accuracy',\n 'balanced_accuracy', 'precision', 'f1', 'recall', 'roc_auc'],\n return_train_score=True)\n", (11380, 11523), False, 'from sklearn.model_selection import cross_val_score, cross_validate, train_test_split\n'), ((11546, 11608), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['clf', 'X_test', 'y_test'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(clf, X_test, y_test, cv=5, scoring='accuracy')\n", (11561, 11608), False, 'from sklearn.model_selection import cross_val_score, cross_validate, train_test_split\n'), ((11945, 11973), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'svm_proba'], {}), '(y_test, svm_proba)\n', (11954, 11973), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((12072, 12099), 'sklearn.metrics.auc', 'auc', (['svm_fpr[0]', 'svm_tpr[0]'], {}), '(svm_fpr[0], svm_tpr[0])\n', (12075, 12099), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((12270, 12309), 'sklearn.metrics.auc', 'auc', (["svm_fpr['micro']", "svm_tpr['micro']"], {}), "(svm_fpr['micro'], svm_tpr['micro'])\n", (12273, 12309), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((12781, 12952), 'sklearn.model_selection.cross_validate', 'cross_validate', (['logreg', 'X_test', 'y_test'], {'cv': '(5)', 'scoring': "['accuracy', 'balanced_accuracy', 'precision', 'f1', 'recall', 'roc_auc']", 'n_jobs': '(-1)', 'return_train_score': '(True)'}), "(logreg, X_test, y_test, cv=5, scoring=['accuracy',\n 'balanced_accuracy', 'precision', 'f1', 'recall', 'roc_auc'], n_jobs=-1,\n return_train_score=True)\n", (12795, 12952), False, 'from sklearn.model_selection import cross_val_score, cross_validate, train_test_split\n'), ((12978, 13043), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['logreg', 'X_test', 'y_test'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(logreg, X_test, y_test, cv=5, scoring='accuracy')\n", (12993, 13043), False, 'from sklearn.model_selection import cross_val_score, cross_validate, train_test_split\n'), ((13063, 13099), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'lr_prediction'], {}), '(y_test, lr_prediction)\n', (13076, 13099), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((13198, 13225), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'lr_proba'], {}), '(y_test, lr_proba)\n', (13207, 13225), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((13247, 13272), 'sklearn.metrics.auc', 'auc', (['lr_fpr[0]', 'lr_tpr[0]'], {}), '(lr_fpr[0], lr_tpr[0])\n', (13250, 13272), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((13387, 13424), 'sklearn.metrics.auc', 'auc', (["lr_fpr['micro']", "lr_tpr['micro']"], {}), "(lr_fpr['micro'], lr_tpr['micro'])\n", (13390, 13424), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((13450, 13503), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_test', 'lr_decision_function'], {}), '(y_test, lr_decision_function)\n', (13473, 13503), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((13540, 13592), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test', 'lr_decision_function'], {}), '(y_test, lr_decision_function)\n', (13562, 13592), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((13630, 13725), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'lr_prediction'], {'average': '"""weighted"""', 'zero_division': '(1)'}), "(y_test, lr_prediction, average='weighted',\n zero_division=1)\n", (13661, 13725), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((13957, 14008), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(2)', 'random_state': '(0)'}), '(max_depth=2, random_state=0)\n', (13979, 14008), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14291, 14323), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'rf_prediction'], {}), '(y_test, rf_prediction)\n', (14300, 14323), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((14345, 14370), 'sklearn.metrics.auc', 'auc', (['rf_fpr[0]', 'rf_tpr[0]'], {}), '(rf_fpr[0], rf_tpr[0])\n', (14348, 14370), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((14490, 14527), 'sklearn.metrics.auc', 'auc', (["rf_fpr['micro']", "rf_tpr['micro']"], {}), "(rf_fpr['micro'], rf_tpr['micro'])\n", (14493, 14527), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((14573, 14618), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test', 'rf_prediction'], {}), '(y_test, rf_prediction)\n', (14595, 14618), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((14652, 14717), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['rf_clf', 'X_test', 'y_test'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(rf_clf, X_test, y_test, cv=5, scoring='accuracy')\n", (14667, 14717), False, 'from sklearn.model_selection import cross_val_score, cross_validate, train_test_split\n'), ((14929, 14953), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (14951, 14953), False, 'from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz\n'), ((15236, 15268), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'dt_prediction'], {}), '(y_test, dt_prediction)\n', (15245, 15268), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((15290, 15315), 'sklearn.metrics.auc', 'auc', (['dt_fpr[0]', 'dt_tpr[0]'], {}), '(dt_fpr[0], dt_tpr[0])\n', (15293, 15315), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((15435, 15472), 'sklearn.metrics.auc', 'auc', (["dt_fpr['micro']", "dt_tpr['micro']"], {}), "(dt_fpr['micro'], dt_tpr['micro'])\n", (15438, 15472), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((15518, 15563), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test', 'dt_prediction'], {}), '(y_test, dt_prediction)\n', (15540, 15563), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((15597, 15662), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['dt_clf', 'X_test', 'y_test'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(dt_clf, X_test, y_test, cv=5, scoring='accuracy')\n", (15612, 15662), False, 'from sklearn.model_selection import cross_val_score, cross_validate, train_test_split\n'), ((10907, 10952), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'mnb_prediction'], {}), '(y_test, mnb_prediction)\n', (10928, 10952), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((11751, 11772), 'numpy.abs', 'np.abs', (['svm_threshold'], {}), '(svm_threshold)\n', (11757, 11772), True, 'import numpy as np\n'), ((13846, 13907), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'lr_prediction'], {'zero_division': '(1)'}), '(y_test, lr_prediction, zero_division=1)\n', (13867, 13907), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((14835, 14879), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'rf_prediction'], {}), '(y_test, rf_prediction)\n', (14856, 14879), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((15860, 15904), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'dt_prediction'], {}), '(y_test, dt_prediction)\n', (15881, 15904), False, 'from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix\n'), ((15906, 15944), 'sklearn.tree.export_text', 'export_text', (['dt_clf'], {'show_weights': '(True)'}), '(dt_clf, show_weights=True)\n', (15917, 15944), False, 'from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz\n'), ((10999, 11046), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': '(2)', 'cache_size': '(9000)', 'probability': '(True)'}), '(C=2, cache_size=9000, probability=True)\n', (11006, 11046), False, 'from sklearn import svm\n'), ((12387, 12422), 'numpy.mean', 'np.mean', (['svm_cross_validation_score'], {}), '(svm_cross_validation_score)\n', (12394, 12422), True, 'import numpy as np\n'), ((12479, 12512), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(4000)'}), '(max_iter=4000)\n', (12497, 12512), False, 'from sklearn.linear_model import LogisticRegression\n'), ((13798, 13832), 'numpy.mean', 'np.mean', (['lr_cross_validation_score'], {}), '(lr_cross_validation_score)\n', (13805, 13832), True, 'import numpy as np\n'), ((6634, 6661), 'pandas.DataFrame', 'pd.DataFrame', (['reg_keys_dict'], {}), '(reg_keys_dict)\n', (6646, 6661), True, 'import pandas as pd\n'), ((6692, 6723), 'pandas.DataFrame', 'pd.DataFrame', (['reg_keys_combined'], {}), '(reg_keys_combined)\n', (6704, 6723), True, 'import pandas as pd\n'), ((6841, 6875), 'pandas.DataFrame', 'pd.DataFrame', (['unknown_samples_dict'], {}), '(unknown_samples_dict)\n', (6853, 6875), True, 'import pandas as pd\n'), ((6905, 6943), 'pandas.DataFrame', 'pd.DataFrame', (['unknown_samples_combined'], {}), '(unknown_samples_combined)\n', (6917, 6943), True, 'import pandas as pd\n'), ((2841, 2862), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (2857, 2862), False, 'import sys\n'), ((2884, 2902), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2900, 2902), False, 'import sys\n'), ((2935, 2966), 'math.ceil', 'math.ceil', (['(0.1 * counter_length)'], {}), '(0.1 * counter_length)\n', (2944, 2966), False, 'import math\n'), ((3035, 3066), 'math.ceil', 'math.ceil', (['(0.2 * counter_length)'], {}), '(0.2 * counter_length)\n', (3044, 3066), False, 'import math\n'), ((3134, 3165), 'math.ceil', 'math.ceil', (['(0.5 * counter_length)'], {}), '(0.5 * counter_length)\n', (3143, 3165), False, 'import math\n'), ((3234, 3265), 'math.ceil', 'math.ceil', (['(0.7 * counter_length)'], {}), '(0.7 * counter_length)\n', (3243, 3265), False, 'import math\n'), ((3334, 3365), 'math.ceil', 'math.ceil', (['(0.8 * counter_length)'], {}), '(0.8 * counter_length)\n', (3343, 3365), False, 'import math\n'), ((3434, 3465), 'math.ceil', 'math.ceil', (['(0.9 * counter_length)'], {}), '(0.9 * counter_length)\n', (3443, 3465), False, 'import math\n'), ((3534, 3566), 'math.ceil', 'math.ceil', (['(0.95 * counter_length)'], {}), '(0.95 * counter_length)\n', (3543, 3566), False, 'import math\n'), ((8168, 8236), 're.findall', 're.findall', (['"""(?<=\\\\.)([^.]+)(?:\\\\.(?:onion|[^.]+(?:$|\\\\n)))"""', 'string'], {}), "('(?<=\\\\.)([^.]+)(?:\\\\.(?:onion|[^.]+(?:$|\\\\n)))', string)\n", (8178, 8236), False, 'import re\n')] |
import argparse
import json
import os
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import Dataset, ConcatDataset, DataLoader
from torchvision import transforms
from torchvision.datasets.folder import pil_loader
from tqdm import trange
DOMAINS = ['clipart', 'infograph', 'painting', 'quickdraw', 'real', 'sketch']
INPUT_SIZE = (84, 84)
NUM_WORKERS = 0
class DomainNetDataset(Dataset):
def __init__(self, root: str, domain: str, class2data: dict, transform=None):
self.classes = list(class2data.keys())
self.n_classes = len(self.classes)
self.root = root
self.domain = domain
data, labels, imgs, np_imgs = [], [], [], []
for c, ds in class2data.items():
for d in ds:
p = root / d
data.append(p)
img = pil_loader(p).resize(INPUT_SIZE)
imgs.append(img)
np_imgs.append(np.asarray(img, dtype='uint8'))
labels.append(self.classes.index(c))
self.labels = labels
self.data = data
np_imgs = np.asarray(np_imgs) / 255.0
self.image_mean = np.mean(np_imgs, axis=(0, 1, 2))
self.image_std = np.std(np_imgs, axis=(0, 1, 2))
self.imgs = imgs
if transform is None:
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=self.image_mean, std=self.image_std)
])
else:
self.transform = transforms
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
img = self.transform(self.imgs[idx])
label = self.labels[idx]
return img, label
class DomainNet:
def __init__(self, data_root):
data_root = Path(data_root)
self.data_root = data_root
domain2data = {}
classes = set()
for domain in DOMAINS:
class2data = {}
path = data_root / domain
for c in os.listdir(path):
classes.add(c)
p = path / c
data_list = []
for image in p.iterdir():
data_list.append(image)
class2data[c] = data_list
domain2data[domain] = class2data
self.domain2data = domain2data
class2data = {c: [] for c in classes}
for d, dd in domain2data.items():
for c, data in dd.items():
class2data[c].extend(data)
self.class2data = class2data
def sample_classes(self, k: int):
# sample k classes with largest number of data
class2size = {c: len(d) for c, d in self.class2data.items()}
k_largest = sorted(class2size.items(), key=lambda kv: -kv[1])[:k]
return [i[0] for i in k_largest]
def sample_data_from_domain(self, domain: str, classes: list, n_split: int = 1):
class2data = {c: self.domain2data[domain][c] for c in classes}
labeler_class2data = [{c: [] for c in classes} for i in range(n_split)]
for c, data in class2data.items():
for i, d in enumerate(np.array_split(data, n_split)):
labeler_class2data[i][c] = d.tolist()
datasets = [DomainNetDataset(self.data_root, domain, class2data) for c2d in labeler_class2data]
return datasets
class WeakLabeler(nn.Module):
'''
Class for a weak labeler on the Domain Net dataset
'''
def __init__(self, n_classes, model_name, pretrained=False):
'''
Constructor for an endmodel
'''
super(WeakLabeler, self).__init__()
# use un-initialized resnet so that more distinct representations are learned
self.model = getattr(torchvision.models, model_name)(pretrained=pretrained)
for param in self.model.parameters():
param.requires_grad = not pretrained
num_ftrs = self.model.fc.in_features
self.model.fc = nn.Linear(num_ftrs, n_classes)
def forward(self, images):
'''
Method for a foward pass of the endmodel
'''
resnet_out = self.model(images)
return resnet_out
def train(train_data: DomainNetDataset, test_data, model_name, pretrained, n_epochs, lr, batch_size, device, test_batch_size=512):
train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=NUM_WORKERS)
test_dataloader = DataLoader(test_data, batch_size=test_batch_size, shuffle=False, pin_memory=True, num_workers=NUM_WORKERS)
model = WeakLabeler(train_data.n_classes, model_name, pretrained)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
objective_function = torch.nn.CrossEntropyLoss()
model.to(device)
best_model = model.state_dict()
best_acc = -1
best_epoch = -1
with trange(n_epochs, unit="epochs", ncols=100, position=0, leave=True) as pbar:
for ep in range(n_epochs):
total_ex = 0
pos_ex = 0
for x, y in train_dataloader:
# zeroing gradient
optimizer.zero_grad()
# moving data to GPU/CPU
inputs = x.to(device)
labels = y.to(device)
pos_ex += torch.sum(labels).item()
total_ex += len(labels)
outputs = model(inputs)
loss = objective_function(outputs, labels)
loss.backward()
optimizer.step()
train_acc = evaluate(model, train_dataloader, device)
acc = evaluate(model, test_dataloader, device)
pbar.update()
if acc > best_acc:
best_model = model.state_dict()
best_acc = acc
best_epoch = ep
pbar.set_postfix(ordered_dict={'train acc': train_acc, 'valid acc': acc, 'best_epoch': best_epoch})
model.load_state_dict(best_model)
model.cpu()
return model, train_data.classes
@torch.no_grad()
def evaluate(model, dataloader, device):
model.to(device)
model.eval()
predictions = []
labs = []
for x, y in dataloader:
np_labels = y.numpy()
# moving data to GPU/CPU
inputs = x.to(device)
_, preds = torch.max(model(inputs), 1)
np_preds = torch.Tensor.cpu(preds).numpy()
predictions.append(np_preds)
labs.append(np_labels)
predictions = np.concatenate(predictions)
labs = np.concatenate(labs)
acc = np.mean(predictions == labs)
model.train()
return acc
@torch.no_grad()
def apply(model, dataset, model_classes, device, batch_size=512):
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=NUM_WORKERS)
predictions = []
probas = []
model.to(device)
model.eval()
for x, y in dataloader:
# moving data to GPU/CPU
inputs = x.to(device)
proba = torch.softmax(model(inputs), 1)
_, preds = torch.max(proba, 1)
predictions.extend(preds.tolist())
probas.extend(proba.tolist())
model.train()
target_class = target_dataset.classes
preds = []
for i in predictions:
c = model_classes[i]
if c in target_class:
preds.append(target_class.index(c))
else:
preds.append(-1)
return predictions, probas
def train_labelers(datasets, model_name, pretrained, n_epochs, lr, batch_size, device):
if isinstance(datasets, list):
n = len(datasets)
if n > 1:
labelers = []
model_classes_l = []
for i in range(n):
train_data = datasets[i]
test_data = ConcatDataset([datasets[j] for j in range(n) if j != i])
labeler, model_classes = train(train_data, test_data, model_name, pretrained, n_epochs, lr, batch_size, device)
labelers.append(labeler)
model_classes_l.append(model_classes)
return labelers, model_classes_l
else:
datasets = datasets[0]
labeler, model_classes = train(datasets, datasets, model_name, pretrained, n_epochs, lr, batch_size, device)
labelers = [labeler]
model_classes_l = [model_classes]
return labelers, model_classes_l
def dataset_split(n, split=(0.8, 0.1, 0.1)):
assert sum(split) == 1
perm = np.random.permutation(n)
s1, s2 = int(n * split[0]), int(n * (split[0] + split[1]))
train_idx = perm[:s1]
valid_idx = perm[s1:s2]
test_idx = perm[s2:]
return train_idx, valid_idx, test_idx
def dump(o, p):
with open(p, "w+") as file:
file.write("{\n")
for i, key in enumerate(list(o.keys())):
file.write("\t" + f'"{key}"' + ": ")
json.dump(o[key], file)
if i < len(o) - 1:
file.write(",\n")
file.write("\n}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', default='./', type=str)
parser.add_argument('--save_root', default='../', type=str)
parser.add_argument('--target_domain', default='real', type=str, choices=DOMAINS)
parser.add_argument('--n_classes', default=5, type=int)
parser.add_argument('--n_labeler_per_domain', default=1, type=int)
parser.add_argument('--model_name', default='resnet18', type=str)
parser.add_argument('--lr', default=0.005, type=float, help="learning rate")
parser.add_argument('--n_epochs', default=100, type=int, help="number of training epochs")
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--pretrained', action='store_true')
args = parser.parse_args()
device = torch.device('cuda')
doaminnet_data = DomainNet(args.data_root)
sampled_classes = doaminnet_data.sample_classes(args.n_classes)
print(f'sampled classes: {sampled_classes}')
target_dataset = doaminnet_data.sample_data_from_domain(args.target_domain, sampled_classes, 1)[0]
labeler_domains = []
labeler_classes = []
labeler_predictions = []
labeler_probas = []
for d in DOMAINS:
if d != args.target_domain:
sampled_datasets = doaminnet_data.sample_data_from_domain(d, sampled_classes, args.n_labeler_per_domain)
labelers, model_classes_l = train_labelers(sampled_datasets, args.model_name, args.pretrained, args.n_epochs, args.lr, args.batch_size, device)
for labeler, model_classes in zip(labelers, model_classes_l):
predictions, probas = apply(labeler, target_dataset, model_classes, device, batch_size=512)
labeler_domains.append(d)
labeler_classes.append(model_classes)
labeler_predictions.append(predictions)
labeler_probas.append(probas)
target_classes = target_dataset.classes
save_dir = Path(f'{args.save_root}/domainnet-{args.target_domain}')
os.makedirs(save_dir, exist_ok=True)
label_meta = {i: c for i, c in enumerate(target_classes)}
lf_meta = {}
for i in range(len(labeler_domains)):
lf_meta[i] = {
'domain' : labeler_domains[i],
'label_space': labeler_classes[i],
}
data_meta = {
'lf_meta' : lf_meta,
'input_size': INPUT_SIZE,
}
json.dump(label_meta, open(save_dir / 'label.json', 'w'))
json.dump(data_meta, open(save_dir / 'meta.json', 'w'), indent=4)
labels = target_dataset.labels
img_path = target_dataset.data
weak_labels = list(zip(*labeler_predictions))
weak_probs = list(zip(*labeler_probas))
processed_data = {}
for i in trange(len(target_dataset)):
processed_data[i] = {
'label' : labels[i],
'weak_labels': list(weak_labels[i]),
'data' : {
'image_path': str(img_path[i]),
'weak_probs': list(weak_probs[i]),
},
}
train_idx, valid_idx, test_idx = dataset_split(len(processed_data))
train_data = {i: processed_data[idx] for i, idx in enumerate(train_idx)}
dump(train_data, save_dir / 'train.json')
valid_data = {i: processed_data[idx] for i, idx in enumerate(valid_idx)}
dump(valid_data, save_dir / 'valid.json')
test_data = {i: processed_data[idx] for i, idx in enumerate(test_idx)}
dump(test_data, save_dir / 'test.json')
| [
"torch.nn.CrossEntropyLoss",
"torch.max",
"numpy.array_split",
"torch.sum",
"numpy.mean",
"os.listdir",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.asarray",
"numpy.concatenate",
"torchvision.transforms.ToTensor",
"numpy.random.permutation",
"torch.Tensor.cpu",
"torchvision.transform... | [((6253, 6268), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6266, 6268), False, 'import torch\n'), ((6856, 6871), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6869, 6871), False, 'import torch\n'), ((4501, 4606), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': 'NUM_WORKERS'}), '(train_data, batch_size=batch_size, shuffle=True, pin_memory=True,\n num_workers=NUM_WORKERS)\n', (4511, 4606), False, 'from torch.utils.data import Dataset, ConcatDataset, DataLoader\n'), ((4626, 4737), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'test_batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': 'NUM_WORKERS'}), '(test_data, batch_size=test_batch_size, shuffle=False, pin_memory\n =True, num_workers=NUM_WORKERS)\n', (4636, 4737), False, 'from torch.utils.data import Dataset, ConcatDataset, DataLoader\n'), ((4928, 4955), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (4953, 4955), False, 'import torch\n'), ((6714, 6741), 'numpy.concatenate', 'np.concatenate', (['predictions'], {}), '(predictions)\n', (6728, 6741), True, 'import numpy as np\n'), ((6754, 6774), 'numpy.concatenate', 'np.concatenate', (['labs'], {}), '(labs)\n', (6768, 6774), True, 'import numpy as np\n'), ((6786, 6814), 'numpy.mean', 'np.mean', (['(predictions == labs)'], {}), '(predictions == labs)\n', (6793, 6814), True, 'import numpy as np\n'), ((6957, 7060), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': 'NUM_WORKERS'}), '(dataset, batch_size=batch_size, shuffle=False, pin_memory=True,\n num_workers=NUM_WORKERS)\n', (6967, 7060), False, 'from torch.utils.data import Dataset, ConcatDataset, DataLoader\n'), ((8730, 8754), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (8751, 8754), True, 'import numpy as np\n'), ((9302, 9327), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9325, 9327), False, 'import argparse\n'), ((10103, 10123), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (10115, 10123), False, 'import torch\n'), ((11294, 11350), 'pathlib.Path', 'Path', (['f"""{args.save_root}/domainnet-{args.target_domain}"""'], {}), "(f'{args.save_root}/domainnet-{args.target_domain}')\n", (11298, 11350), False, 'from pathlib import Path\n'), ((11356, 11392), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (11367, 11392), False, 'import os\n'), ((1237, 1269), 'numpy.mean', 'np.mean', (['np_imgs'], {'axis': '(0, 1, 2)'}), '(np_imgs, axis=(0, 1, 2))\n', (1244, 1269), True, 'import numpy as np\n'), ((1296, 1327), 'numpy.std', 'np.std', (['np_imgs'], {'axis': '(0, 1, 2)'}), '(np_imgs, axis=(0, 1, 2))\n', (1302, 1327), True, 'import numpy as np\n'), ((1910, 1925), 'pathlib.Path', 'Path', (['data_root'], {}), '(data_root)\n', (1914, 1925), False, 'from pathlib import Path\n'), ((4130, 4160), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'n_classes'], {}), '(num_ftrs, n_classes)\n', (4139, 4160), True, 'import torch.nn as nn\n'), ((5067, 5133), 'tqdm.trange', 'trange', (['n_epochs'], {'unit': '"""epochs"""', 'ncols': '(100)', 'position': '(0)', 'leave': '(True)'}), "(n_epochs, unit='epochs', ncols=100, position=0, leave=True)\n", (5073, 5133), False, 'from tqdm import trange\n'), ((7303, 7322), 'torch.max', 'torch.max', (['proba', '(1)'], {}), '(proba, 1)\n', (7312, 7322), False, 'import torch\n'), ((1182, 1201), 'numpy.asarray', 'np.asarray', (['np_imgs'], {}), '(np_imgs)\n', (1192, 1201), True, 'import numpy as np\n'), ((2135, 2151), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2145, 2151), False, 'import os\n'), ((9138, 9161), 'json.dump', 'json.dump', (['o[key]', 'file'], {}), '(o[key], file)\n', (9147, 9161), False, 'import json\n'), ((3282, 3311), 'numpy.array_split', 'np.array_split', (['data', 'n_split'], {}), '(data, n_split)\n', (3296, 3311), True, 'import numpy as np\n'), ((6589, 6612), 'torch.Tensor.cpu', 'torch.Tensor.cpu', (['preds'], {}), '(preds)\n', (6605, 6612), False, 'import torch\n'), ((1019, 1049), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': '"""uint8"""'}), "(img, dtype='uint8')\n", (1029, 1049), True, 'import numpy as np\n'), ((1455, 1476), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1474, 1476), False, 'from torchvision import transforms\n'), ((1495, 1557), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'self.image_mean', 'std': 'self.image_std'}), '(mean=self.image_mean, std=self.image_std)\n', (1515, 1557), False, 'from torchvision import transforms\n'), ((920, 933), 'torchvision.datasets.folder.pil_loader', 'pil_loader', (['p'], {}), '(p)\n', (930, 933), False, 'from torchvision.datasets.folder import pil_loader\n'), ((5500, 5517), 'torch.sum', 'torch.sum', (['labels'], {}), '(labels)\n', (5509, 5517), False, 'import torch\n')] |
import pickle
from collections import defaultdict
from pathlib import Path
from typing import Optional, Callable
import numpy as np
import torch
import torch.utils.data as torchdata
from ignite.contrib.handlers import ProgressBar
from ignite.engine import create_supervised_evaluator, Events, Engine
from ignite.metrics import Accuracy, Loss
from torch import nn
from torch.nn import functional as F
from alr import ALRModel
from alr import MCDropout
from alr.acquisition import BALD
from alr.data import DataManager
from alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset
from alr.data.datasets import Dataset
from alr.training import Trainer
from alr.training.samplers import RandomFixedLengthSampler
from alr.training.utils import EarlyStopper, PLPredictionSaver
from alr.utils import eval_fwd_exp, timeop, manual_seed
from alr.utils._type_aliases import _DeviceType, _Loss_fn
class PseudoLabelManager:
def __init__(
self,
pool: UnlabelledDataset,
model: nn.Module,
threshold: float,
log_dir: Optional[str] = None,
device: _DeviceType = None,
**kwargs,
):
bs = kwargs.pop("batch_size", 1024)
shuffle = kwargs.pop("shuffle", False)
assert not shuffle
self._pool = pool
self._loader = torchdata.DataLoader(
pool, batch_size=bs, shuffle=shuffle, **kwargs
)
self._model = model
self._log_dir = log_dir
self._device = device
self._threshold = threshold
self.acquired_sizes = []
def attach(self, engine: Engine):
engine.add_event_handler(Events.STARTED, self._initialise)
# could also be EPOCH_COMPLETED since there's only one iteration in each epoch
engine.add_event_handler(Events.ITERATION_COMPLETED, self._load_labels)
def _load_labels(self, engine: Engine):
evaluator = create_supervised_evaluator(
self._model, metrics=None, device=self._device
)
plc = PseudoLabelCollector(
self._threshold,
log_dir=self._log_dir,
)
plc.attach(evaluator, batch_size=self._loader.batch_size)
plc.global_step_from_engine(engine)
evaluator.run(self._loader)
indices, pseudo_labels = (
evaluator.state.pl_indices.cpu().numpy(),
evaluator.state.pl_plabs.cpu().numpy(),
)
self.acquired_sizes.append(indices.shape[0])
if indices.shape[0]:
confident_points = torchdata.Subset(self._pool, indices)
if self._pool.debug:
# pool returns target labels too
engine.state.pseudo_labelled_dataset = RelabelDataset(
confident_points, pseudo_labels
)
else:
engine.state.pseudo_labelled_dataset = PseudoLabelDataset(
confident_points, pseudo_labels
)
else:
engine.state.pseudo_labelled_dataset = None
@staticmethod
def _initialise(engine: Engine):
engine.state.pseudo_labelled_dataset = None
class PseudoLabelCollector:
def __init__(
self,
threshold: float,
log_dir: Optional[str] = None,
pred_transform: Callable[[torch.Tensor], torch.Tensor] = lambda x: x.exp(),
):
self._indices = []
self._plabs = []
self._pred_transform = pred_transform
self._output_transform = lambda x: x
self._thresh = threshold
self._targets = []
self._preds = []
if log_dir:
self._saver = PLPredictionSaver(log_dir, pred_transform=pred_transform)
else:
self._saver = None
self._batch_size = None
def _parse(self, engine: Engine):
preds, targets = self._output_transform(engine.state.output)
# state.iteration starts with 1
iteration = engine.state.iteration - 1
offset = iteration * self._batch_size
with torch.no_grad():
preds = self._pred_transform(preds)
preds_max, plabs = torch.max(preds, dim=-1)
mask = torch.nonzero(preds_max >= self._thresh).flatten()
if mask.shape[0]:
# plabs = [N,]
self._plabs.append(plabs[mask])
self._indices.append(mask + offset)
def _flush(self, engine: Engine):
if self._indices and self._plabs:
engine.state.pl_indices = torch.cat(self._indices)
engine.state.pl_plabs = torch.cat(self._plabs)
else:
engine.state.pl_indices = torch.Tensor([])
engine.state.pl_plabs = torch.Tensor([])
self._indices = []
self._plabs = []
def attach(self, engine: Engine, batch_size: int, output_transform=lambda x: x):
r"""
Args:
engine (Engine): ignite engine object
batch_size (int): engine's batch size
output_transform (Callable): if engine.state.output is not (preds, target),
then output_transform should return aforementioned tuple.
Returns:
NoneType: None
"""
engine.add_event_handler(Events.ITERATION_COMPLETED, self._parse)
engine.add_event_handler(Events.COMPLETED, self._flush)
self._output_transform = output_transform
self._batch_size = batch_size
if self._saver:
self._saver.attach(engine, output_transform=output_transform)
def global_step_from_engine(self, engine: Engine):
if self._saver:
self._saver.global_step_from_engine(engine)
def _update_dataloader(
loader: torchdata.DataLoader,
dataset: torchdata.Dataset,
sampler: Optional[torchdata.Sampler] = None,
):
# attributes that usually go in dataloader's constructor
attrs = [k for k in loader.__dict__.keys() if not k.startswith("_")]
drop = ["dataset", "sampler", "batch_sampler", "dataset_kind"]
kwargs = {k: getattr(loader, k) for k in attrs if k not in drop}
if not isinstance(
loader.sampler,
(
torchdata.SequentialSampler,
torchdata.RandomSampler,
RandomFixedLengthSampler,
),
):
raise ValueError(
f"Only sequential, random, and random fixed length samplers "
f"are supported in _update_dataloader"
)
kwargs["dataset"] = dataset
# Sequential and Random will be automatically determined if sampler is None (depending on shuffle)
kwargs["sampler"] = sampler
return torchdata.DataLoader(**kwargs)
def create_pseudo_label_trainer(
model: ALRModel,
loss: _Loss_fn,
optimiser: str,
train_loader: torchdata.DataLoader,
val_loader: torchdata.DataLoader,
pseudo_label_manager: PseudoLabelManager,
rfls_len: Optional[int] = None,
patience: Optional[int] = None,
reload_best: Optional[bool] = None,
epochs: Optional[int] = 1,
device: _DeviceType = None,
*args,
**kwargs,
):
def _step(engine: Engine, _):
# update loader accordingly: if pld is not none, concatenate them
new_loader = train_loader
pld = engine.state.pseudo_labelled_dataset
if pld is not None:
# only reset weights if engine.state.epoch != 1
model.reset_weights()
train_ds = torchdata.ConcatDataset((train_loader.dataset, pld))
# update dataloader's dataset attribute
if rfls_len:
new_loader = _update_dataloader(
train_loader,
train_ds,
RandomFixedLengthSampler(train_ds, length=rfls_len, shuffle=True),
)
else:
new_loader = _update_dataloader(train_loader, train_ds)
else:
assert engine.state.epoch == 1
# begin supervised training
trainer = Trainer(
model,
loss,
optimiser,
patience,
reload_best,
device=device,
*args,
**kwargs,
)
history = trainer.fit(
new_loader,
val_loader=val_loader,
epochs=epochs,
)
# if early stopping was applied w/ patience, then the actual train acc and loss should be
# -patience from the final loss/acc UNLESS we reached the maximum number of epochs.
if patience and len(history["train_loss"]) != epochs:
return history["train_loss"][-patience], history["train_acc"][-patience]
return history["train_loss"][-1], history["train_acc"][-1]
e = Engine(_step)
pseudo_label_manager.attach(e)
return e
class EphemeralTrainer:
def __init__(
self,
model: ALRModel,
pool: UnlabelledDataset,
loss: _Loss_fn,
optimiser: str,
threshold: float,
random_fixed_length_sampler_length: Optional[int] = None,
log_dir: Optional[str] = None,
patience: Optional[int] = None,
reload_best: Optional[bool] = False,
device: _DeviceType = None,
pool_loader_kwargs: Optional[dict] = {},
*args,
**kwargs,
):
self._pool = pool
self._model = model
self._loss = loss
self._optimiser = optimiser
self._patience = patience
self._reload_best = reload_best
self._device = device
self._args = args
self._kwargs = kwargs
self._threshold = threshold
self._log_dir = log_dir
self._pool_loader_kwargs = pool_loader_kwargs
self._rfls_len = random_fixed_length_sampler_length
def fit(
self,
train_loader: torchdata.DataLoader,
val_loader: Optional[torchdata.DataLoader] = None,
iterations: Optional[int] = 1,
epochs: Optional[int] = 1,
):
if self._patience and val_loader is None:
raise ValueError(
"If patience is specified, then val_loader must be provided in .fit()."
)
val_evaluator = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(self._loss)},
device=self._device,
)
history = defaultdict(list)
pbar = ProgressBar()
def _log_metrics(engine: Engine):
# train_loss and train_acc are moving averages of the last epoch
# in the supervised training loop
train_loss, train_acc = engine.state.output
history[f"train_loss"].append(train_loss)
history[f"train_acc"].append(train_acc)
pbar.log_message(
f"Eph. iteration {engine.state.epoch}/{engine.state.max_epochs}\n"
f"\ttrain acc = {train_acc}, train loss = {train_loss}"
)
if val_loader is None:
return # job done
# val loader - save to history and print metrics. Also, add handlers to
# evaluator (e.g. early stopping, model checkpointing that depend on val_acc)
metrics = val_evaluator.run(val_loader).metrics
history[f"val_acc"].append(metrics["acc"])
history[f"val_loss"].append(metrics["loss"])
pbar.log_message(
f"\tval acc = {metrics['acc']}, val loss = {metrics['loss']}"
)
pseudo_label_manager = PseudoLabelManager(
pool=self._pool,
model=self._model,
threshold=self._threshold,
log_dir=self._log_dir,
device=self._device,
**self._pool_loader_kwargs,
)
trainer = create_pseudo_label_trainer(
model=self._model,
loss=self._loss,
optimiser=self._optimiser,
train_loader=train_loader,
val_loader=val_loader,
pseudo_label_manager=pseudo_label_manager,
rfls_len=self._rfls_len,
patience=self._patience,
reload_best=self._reload_best,
epochs=epochs,
device=self._device,
*self._args,
**self._kwargs,
)
# output of trainer are running averages of train_loss and train_acc (from the
# last epoch of the supervised trainer)
pbar.attach(trainer, output_transform=lambda x: {"loss": x[0], "acc": x[1]})
if val_loader is not None and self._patience:
es = EarlyStopper(
self._model, self._patience, trainer, key="acc", mode="max"
)
es.attach(val_evaluator)
trainer.add_event_handler(Events.EPOCH_COMPLETED, _log_metrics)
trainer.run(
range(iterations),
max_epochs=iterations,
epoch_length=1,
)
if val_loader is not None and self._patience and self._reload_best:
es.reload_best()
history["train_size"] = np.array(pseudo_label_manager.acquired_sizes) + len(
train_loader.dataset
)
return history
def evaluate(self, data_loader: torchdata.DataLoader) -> dict:
evaluator = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(self._loss)},
device=self._device,
)
return evaluator.run(data_loader).metrics
def main(threshold: float, b: int):
manual_seed(42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
kwargs = dict(num_workers=4, pin_memory=True)
BATCH_SIZE = 64
REPS = 3
ITERS = 14
VAL_SIZE = 5_000
MIN_TRAIN_LEN = 12_500
SSL_ITERATIONS = 200
EPOCHS = 200
accs = defaultdict(list)
template = f"thresh_{threshold}_b_{b}"
calib_metrics = Path("calib_metrics") / template
saved_models = Path("saved_models") / template
metrics = Path("metrics") / template
calib_metrics.mkdir(parents=True)
saved_models.mkdir(parents=True)
metrics.mkdir(parents=True)
train, pool, test = Dataset.MNIST.get_fixed()
val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))
pool = UnlabelledDataset(pool)
test_loader = torchdata.DataLoader(test, batch_size=512, shuffle=False, **kwargs)
val_loader = torchdata.DataLoader(val, batch_size=512, shuffle=False, **kwargs)
for r in range(1, REPS + 1):
model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)
bald = BALD(eval_fwd_exp(model), device=device, batch_size=512, **kwargs)
dm = DataManager(train, pool, bald)
dm.reset() # to reset pool
print(f"=== repeat #{r} of {REPS} ===")
for i in range(1, ITERS + 1):
# don't reset weights: let ephemeral trainer take care of it
# since we're collecting calibration metrics,
# make pool return targets too. (i.e. debug mode)
with dm.unlabelled.tmp_debug():
trainer = EphemeralTrainer(
model,
dm.unlabelled,
F.nll_loss,
"Adam",
threshold=threshold,
random_fixed_length_sampler_length=MIN_TRAIN_LEN,
log_dir=(calib_metrics / f"rep_{r}" / f"iter_{i}"),
patience=3,
reload_best=True,
device=device,
pool_loader_kwargs=kwargs,
)
train_loader = torchdata.DataLoader(
dm.labelled,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
dm.labelled, MIN_TRAIN_LEN, shuffle=True
),
**kwargs,
)
with timeop() as t:
history = trainer.fit(
train_loader,
val_loader,
iterations=SSL_ITERATIONS,
epochs=EPOCHS,
)
# eval on test set
test_metrics = trainer.evaluate(test_loader)
accs[dm.n_labelled].append(test_metrics["acc"])
print(f"-- Iteration {i} of {ITERS} --")
print(
f"\ttrain: {dm.n_labelled}; pool: {dm.n_unlabelled}\n"
f"\t[test] acc: {test_metrics['acc']}; time: {t}"
)
# save stuff
with open(metrics / f"rep_{r}_iter_{i}.pkl", "wb") as fp:
payload = {
"history": history,
"test_metrics": test_metrics,
"labelled_classes": dm.unlabelled.labelled_classes,
"labelled_indices": dm.unlabelled.labelled_indices,
}
pickle.dump(payload, fp)
torch.save(model.state_dict(), saved_models / f"rep_{r}_iter_{i}.pth")
# finally, acquire points
dm.acquire(b)
with open(f"{template}_accs.pkl", "wb") as fp:
pickle.dump(accs, fp)
if __name__ == "__main__":
main(threshold=0.95, b=10)
| [
"ignite.engine.create_supervised_evaluator",
"torch.utils.data.ConcatDataset",
"ignite.metrics.Loss",
"ignite.metrics.Accuracy",
"torch.max",
"alr.training.utils.PLPredictionSaver",
"alr.MCDropout",
"ignite.engine.Engine",
"alr.data.datasets.Dataset.MNIST.get_fixed",
"numpy.array",
"torch.cuda.i... | [((6568, 6598), 'torch.utils.data.DataLoader', 'torchdata.DataLoader', ([], {}), '(**kwargs)\n', (6588, 6598), True, 'import torch.utils.data as torchdata\n'), ((8645, 8658), 'ignite.engine.Engine', 'Engine', (['_step'], {}), '(_step)\n', (8651, 8658), False, 'from ignite.engine import create_supervised_evaluator, Events, Engine\n'), ((13399, 13414), 'alr.utils.manual_seed', 'manual_seed', (['(42)'], {}), '(42)\n', (13410, 13414), False, 'from alr.utils import eval_fwd_exp, timeop, manual_seed\n'), ((13692, 13709), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (13703, 13709), False, 'from collections import defaultdict\n'), ((14031, 14056), 'alr.data.datasets.Dataset.MNIST.get_fixed', 'Dataset.MNIST.get_fixed', ([], {}), '()\n', (14054, 14056), False, 'from alr.data.datasets import Dataset\n'), ((14147, 14170), 'alr.data.UnlabelledDataset', 'UnlabelledDataset', (['pool'], {}), '(pool)\n', (14164, 14170), False, 'from alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset\n'), ((14189, 14256), 'torch.utils.data.DataLoader', 'torchdata.DataLoader', (['test'], {'batch_size': '(512)', 'shuffle': '(False)'}), '(test, batch_size=512, shuffle=False, **kwargs)\n', (14209, 14256), True, 'import torch.utils.data as torchdata\n'), ((14274, 14340), 'torch.utils.data.DataLoader', 'torchdata.DataLoader', (['val'], {'batch_size': '(512)', 'shuffle': '(False)'}), '(val, batch_size=512, shuffle=False, **kwargs)\n', (14294, 14340), True, 'import torch.utils.data as torchdata\n'), ((1313, 1381), 'torch.utils.data.DataLoader', 'torchdata.DataLoader', (['pool'], {'batch_size': 'bs', 'shuffle': 'shuffle'}), '(pool, batch_size=bs, shuffle=shuffle, **kwargs)\n', (1333, 1381), True, 'import torch.utils.data as torchdata\n'), ((1901, 1976), 'ignite.engine.create_supervised_evaluator', 'create_supervised_evaluator', (['self._model'], {'metrics': 'None', 'device': 'self._device'}), '(self._model, metrics=None, device=self._device)\n', (1928, 1976), False, 'from ignite.engine import create_supervised_evaluator, Events, Engine\n'), ((7910, 8000), 'alr.training.Trainer', 'Trainer', (['model', 'loss', 'optimiser', 'patience', 'reload_best', '*args'], {'device': 'device'}), '(model, loss, optimiser, patience, reload_best, *args, device=device,\n **kwargs)\n', (7917, 8000), False, 'from alr.training import Trainer\n'), ((10272, 10289), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10283, 10289), False, 'from collections import defaultdict\n'), ((10305, 10318), 'ignite.contrib.handlers.ProgressBar', 'ProgressBar', ([], {}), '()\n', (10316, 10318), False, 'from ignite.contrib.handlers import ProgressBar\n'), ((13774, 13795), 'pathlib.Path', 'Path', (['"""calib_metrics"""'], {}), "('calib_metrics')\n", (13778, 13795), False, 'from pathlib import Path\n'), ((13826, 13846), 'pathlib.Path', 'Path', (['"""saved_models"""'], {}), "('saved_models')\n", (13830, 13846), False, 'from pathlib import Path\n'), ((13872, 13887), 'pathlib.Path', 'Path', (['"""metrics"""'], {}), "('metrics')\n", (13876, 13887), False, 'from pathlib import Path\n'), ((14551, 14581), 'alr.data.DataManager', 'DataManager', (['train', 'pool', 'bald'], {}), '(train, pool, bald)\n', (14562, 14581), False, 'from alr.data import DataManager\n'), ((2519, 2556), 'torch.utils.data.Subset', 'torchdata.Subset', (['self._pool', 'indices'], {}), '(self._pool, indices)\n', (2535, 2556), True, 'import torch.utils.data as torchdata\n'), ((3613, 3670), 'alr.training.utils.PLPredictionSaver', 'PLPredictionSaver', (['log_dir'], {'pred_transform': 'pred_transform'}), '(log_dir, pred_transform=pred_transform)\n', (3630, 3670), False, 'from alr.training.utils import EarlyStopper, PLPredictionSaver\n'), ((4002, 4017), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4015, 4017), False, 'import torch\n'), ((4098, 4122), 'torch.max', 'torch.max', (['preds'], {'dim': '(-1)'}), '(preds, dim=-1)\n', (4107, 4122), False, 'import torch\n'), ((4473, 4497), 'torch.cat', 'torch.cat', (['self._indices'], {}), '(self._indices)\n', (4482, 4497), False, 'import torch\n'), ((4534, 4556), 'torch.cat', 'torch.cat', (['self._plabs'], {}), '(self._plabs)\n', (4543, 4556), False, 'import torch\n'), ((4609, 4625), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (4621, 4625), False, 'import torch\n'), ((4662, 4678), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (4674, 4678), False, 'import torch\n'), ((7360, 7412), 'torch.utils.data.ConcatDataset', 'torchdata.ConcatDataset', (['(train_loader.dataset, pld)'], {}), '((train_loader.dataset, pld))\n', (7383, 7412), True, 'import torch.utils.data as torchdata\n'), ((12460, 12533), 'alr.training.utils.EarlyStopper', 'EarlyStopper', (['self._model', 'self._patience', 'trainer'], {'key': '"""acc"""', 'mode': '"""max"""'}), "(self._model, self._patience, trainer, key='acc', mode='max')\n", (12472, 12533), False, 'from alr.training.utils import EarlyStopper, PLPredictionSaver\n'), ((12936, 12981), 'numpy.array', 'np.array', (['pseudo_label_manager.acquired_sizes'], {}), '(pseudo_label_manager.acquired_sizes)\n', (12944, 12981), True, 'import numpy as np\n'), ((13453, 13478), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13476, 13478), False, 'import torch\n'), ((14476, 14495), 'alr.utils.eval_fwd_exp', 'eval_fwd_exp', (['model'], {}), '(model)\n', (14488, 14495), False, 'from alr.utils import eval_fwd_exp, timeop, manual_seed\n'), ((2694, 2741), 'alr.data.RelabelDataset', 'RelabelDataset', (['confident_points', 'pseudo_labels'], {}), '(confident_points, pseudo_labels)\n', (2708, 2741), False, 'from alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset\n'), ((2853, 2904), 'alr.data.PseudoLabelDataset', 'PseudoLabelDataset', (['confident_points', 'pseudo_labels'], {}), '(confident_points, pseudo_labels)\n', (2871, 2904), False, 'from alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset\n'), ((14391, 14444), 'alr.MCDropout', 'MCDropout', (['Dataset.MNIST.model'], {'forward': '(20)', 'fast': '(True)'}), '(Dataset.MNIST.model, forward=20, fast=True)\n', (14400, 14444), False, 'from alr import MCDropout\n'), ((16807, 16831), 'pickle.dump', 'pickle.dump', (['payload', 'fp'], {}), '(payload, fp)\n', (16818, 16831), False, 'import pickle\n'), ((17056, 17077), 'pickle.dump', 'pickle.dump', (['accs', 'fp'], {}), '(accs, fp)\n', (17067, 17077), False, 'import pickle\n'), ((4142, 4182), 'torch.nonzero', 'torch.nonzero', (['(preds_max >= self._thresh)'], {}), '(preds_max >= self._thresh)\n', (4155, 4182), False, 'import torch\n'), ((7623, 7688), 'alr.training.samplers.RandomFixedLengthSampler', 'RandomFixedLengthSampler', (['train_ds'], {'length': 'rfls_len', 'shuffle': '(True)'}), '(train_ds, length=rfls_len, shuffle=True)\n', (7647, 7688), False, 'from alr.training.samplers import RandomFixedLengthSampler\n'), ((10171, 10181), 'ignite.metrics.Accuracy', 'Accuracy', ([], {}), '()\n', (10179, 10181), False, 'from ignite.metrics import Accuracy, Loss\n'), ((10191, 10207), 'ignite.metrics.Loss', 'Loss', (['self._loss'], {}), '(self._loss)\n', (10195, 10207), False, 'from ignite.metrics import Accuracy, Loss\n'), ((13225, 13235), 'ignite.metrics.Accuracy', 'Accuracy', ([], {}), '()\n', (13233, 13235), False, 'from ignite.metrics import Accuracy, Loss\n'), ((13245, 13261), 'ignite.metrics.Loss', 'Loss', (['self._loss'], {}), '(self._loss)\n', (13249, 13261), False, 'from ignite.metrics import Accuracy, Loss\n'), ((15800, 15808), 'alr.utils.timeop', 'timeop', ([], {}), '()\n', (15806, 15808), False, 'from alr.utils import eval_fwd_exp, timeop, manual_seed\n'), ((15617, 15683), 'alr.training.samplers.RandomFixedLengthSampler', 'RandomFixedLengthSampler', (['dm.labelled', 'MIN_TRAIN_LEN'], {'shuffle': '(True)'}), '(dm.labelled, MIN_TRAIN_LEN, shuffle=True)\n', (15641, 15683), False, 'from alr.training.samplers import RandomFixedLengthSampler\n')] |
"""
Objective functions can be implemented in this file.
Author:
<NAME>
"""
from random import Random
from zoopt.dimension import Dimension
import numpy as np
class SetCover:
"""
set cover problem for discrete optimization
this problem has some extra initialization tasks, thus we define this problem as a class
"""
__weight = None
__subset = None
def __init__(self):
self.__weight = [0.8356, 0.5495, 0.4444, 0.7269, 0.9960, 0.6633, 0.5062, 0.8429, 0.1293, 0.7355,
0.7979, 0.2814, 0.7962, 0.1754, 0.0267, 0.9862, 0.1786, 0.5884, 0.6289, 0.3008]
self.__subset = []
self.__subset.append([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0])
self.__subset.append([0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0])
self.__subset.append([1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0])
self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0])
self.__subset.append([1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1])
self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0])
self.__subset.append([0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0])
self.__subset.append([0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0])
self.__subset.append([0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0])
self.__subset.append([0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1])
self.__subset.append([0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0])
self.__subset.append([0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1])
self.__subset.append([1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1])
self.__subset.append([1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1])
self.__subset.append([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0])
self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1])
self.__subset.append([0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1])
self.__subset.append([0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0])
self.__subset.append([0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1])
def fx(self, solution):
"""
Objective function.
:param solution: a Solution object
:return: the value of f(x)
"""
x = solution.get_x()
allweight = 0
countw = 0
for i in range(len(self.__weight)):
allweight += self.__weight[i]
dims = []
for i in range(len(self.__subset[0])):
dims.append(False)
for i in range(len(self.__subset)):
if x[i] == 1:
countw += self.__weight[i]
for j in range(len(self.__subset[i])):
if self.__subset[i][j] == 1:
dims[j] = True
full = True
for i in range(len(dims)):
if dims[i] is False:
full = False
if full is False:
countw += allweight
return countw
@property
def dim(self):
"""
Dimension of set cover problem.
:return: Dimension instance
"""
dim_size = 20
dim_regs = [[0, 1]] * dim_size
dim_tys = [False] * dim_size
return Dimension(dim_size, dim_regs, dim_tys)
def sphere(solution):
"""
Sphere function for continuous optimization
"""
x = solution.get_x()
value = sum([(i-0.2)*(i-0.2) for i in x])
return value
def sphere_mixed(solution):
"""
Sphere function for mixed optimization
"""
x = solution.get_x()
value = sum([i*i for i in x])
return value
def sphere_discrete_order(solution):
"""
Sphere function for integer continuous optimization
"""
a = 0
rd = Random()
x = solution.get_x()
value = sum([(i-2)*(i-2) for i in x])
return value
def ackley(solution):
"""
Ackley function for continuous optimization
"""
x = solution.get_x()
bias = 0.2
ave_seq = sum([(i - bias) * (i - bias) for i in x]) / len(x)
ave_cos = sum([np.cos(2.0*np.pi*(i-bias)) for i in x]) / len(x)
value = -20 * np.exp(-0.2 * np.sqrt(ave_seq)) - np.exp(ave_cos) + 20.0 + np.e
return value
def ackley_noise_creator(mu, sigma):
"""
Ackley function under noise
"""
return lambda solution: ackley(solution) + np.random.normal(mu, sigma, 1)
| [
"numpy.random.normal",
"numpy.sqrt",
"random.Random",
"zoopt.dimension.Dimension",
"numpy.exp",
"numpy.cos"
] | [((4688, 4696), 'random.Random', 'Random', ([], {}), '()\n', (4694, 4696), False, 'from random import Random\n'), ((4178, 4216), 'zoopt.dimension.Dimension', 'Dimension', (['dim_size', 'dim_regs', 'dim_tys'], {}), '(dim_size, dim_regs, dim_tys)\n', (4187, 4216), False, 'from zoopt.dimension import Dimension\n'), ((5275, 5305), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', '(1)'], {}), '(mu, sigma, 1)\n', (5291, 5305), True, 'import numpy as np\n'), ((4993, 5025), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * (i - bias))'], {}), '(2.0 * np.pi * (i - bias))\n', (4999, 5025), True, 'import numpy as np\n'), ((5094, 5109), 'numpy.exp', 'np.exp', (['ave_cos'], {}), '(ave_cos)\n', (5100, 5109), True, 'import numpy as np\n'), ((5074, 5090), 'numpy.sqrt', 'np.sqrt', (['ave_seq'], {}), '(ave_seq)\n', (5081, 5090), True, 'import numpy as np\n')] |
from typing import Iterable, Union, Optional
from time import strftime, localtime
import pandas as pd
import numpy as np
from tqdm import tqdm
import qontrol
from plab.config import logger, CONFIG
from plab.measurement import measurement, Measurement
from plab.smu.smu_control import smu_control
@measurement
def sweep_current(
imin: float = 0, imax: float = 50e-3, steps: int = 20, n: int = 1
) -> pd.DataFrame:
"""Sweep current and measure voltage. works only for q8iv
Args:
imin: min current
imax: max current
steps: number of steps
n: number of channels to sweep
"""
currents = np.linspace(imin, imax, steps)
df = pd.DataFrame(dict(i=currents))
if isinstance(n, int):
channels = range(n)
else:
channels = n
for channel in channels:
currents = np.zeros_like(currents)
# set all channels to zero
q.v[:] = 0
for j, voltage in enumerate(currents):
q.i[channel] = float(voltage)
measured_voltage = q.v[channel]
measured_current = q.i[channel]
currents[j] = measured_current
df[f"i_{channel}"] = currents
return df
def get_current(channel: int, voltage: float) -> float:
"""Sets voltage for a channel and returns measured current.
Args:
channel:
voltage:
"""
q = smu_qontrol()
q.v[channel] = float(voltage)
return q.i[channel]
def zero_voltage() -> None:
"""Sets all voltage channels to zero."""
q = smu_qontrol()
q.v[:] = 0
return
if __name__ == "__main__":
zero_voltage()
# print(get_current(62, 0.1))
# m = sweep_voltage(vmax=3, channels=(1,))
# m.write()
| [
"numpy.linspace",
"numpy.zeros_like"
] | [((637, 667), 'numpy.linspace', 'np.linspace', (['imin', 'imax', 'steps'], {}), '(imin, imax, steps)\n', (648, 667), True, 'import numpy as np\n'), ((843, 866), 'numpy.zeros_like', 'np.zeros_like', (['currents'], {}), '(currents)\n', (856, 866), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""model_main"""
import os
import numpy as np
import psutil
import mindspore as ms
import mindspore.ops as P
import mindspore.common.initializer as init
from mindspore import nn
from mindspore.common.initializer import Initializer, _assignment, random_normal
from model.resnet import resnet50
from model.vib import VIB
def show_memory_info(hint=""):
"""show_memory_info"""
pid = os.getpid()
p = psutil.Process(pid)
info = p.memory_full_info()
memory = info.uss / 1024. / 1024
print(f"{hint} memory used: {memory} MB ")
def to_edge(x):
"""to_edge"""
r = x[:, 0, :, :]
g = x[:, 1, :, :]
b = x[:, 2, :, :]
xx = 0.2989 * r + 0.5870 * g + 0.1440 * b
xx = xx.view((xx.shape[0], 1, xx.shape[1], xx.shape[2]))
return xx # N x 1 x h x w
class NormalWithMean(Initializer):
"""
Initialize a normal array, and obtain values N(0, sigma) from the uniform distribution
to fill the input tensor.
Args:
sigma (float): The sigma of the array. Default: 0.01.
Returns:
Array, normal array.
"""
def __init__(self, mu=0, sigma=0.01):
super(NormalWithMean, self).__init__(sigma=sigma)
self.mu = mu
self.sigma = sigma
def _initialize(self, arr):
seed, seed2 = self.seed
output_tensor = ms.Tensor(
np.zeros(arr.shape, dtype=np.float32) + np.ones(arr.shape, dtype=np.float32) * self.mu)
random_normal(arr.shape, seed, seed2, output_tensor)
output_data = output_tensor.asnumpy()
output_data *= self.sigma
_assignment(arr, output_data)
def weights_init_kaiming(m):
"""weights_init_kaiming"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.set_data(
init.initializer(init.HeNormal(negative_slope=0, mode='fan_in'), m.weight.shape, m.weight.dtype))
elif classname.find('Linear') != -1:
m.weight.set_data(
init.initializer(init.HeNormal(negative_slope=0, mode='fan_out'), m.weight.shape, m.weight.dtype))
m.bias.set_data(init.initializer(init.Zero(), m.bias.shape, m.bias.dtype))
elif classname.find('BatchNorm1d') != -1:
m.gamma.set_data(init.initializer(NormalWithMean(mu=1, sigma=0.01), m.gamma.shape, m.gamma.dtype))
m.beta.set_data(init.initializer(init.Zero(), m.beta.shape, m.beta.dtype))
def weights_init_classifier(m):
"""weights_init_classifier"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.gamma.set_data(init.initializer(init.Normal(sigma=0.001), m.gamma.shape, m.gamma.dtype))
if m.bias:
m.bias.set_data(init.initializer(init.Zero(), m.bias.shape, m.bias.dtype))
class Normalize(nn.Cell):
"""Normalize"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
self.pow = P.Pow()
self.sum = P.ReduceSum(keep_dims=True)
self.div = P.Div()
def construct(self, x):
norm = self.pow(x, self.power)
norm = self.sum(norm, 1)
norm = self.pow(norm, 1. / self.power)
out = self.div(x, norm)
return out
class VisibleBackbone(nn.Cell):
"""VisibleBackbone"""
def __init__(self, num_class=395, arch="resnet50", pretrain=""):
super(VisibleBackbone, self).__init__()
self.visible = resnet50(num_class=num_class, pretrain=pretrain)
self.arch = arch
def construct(self, x):
x = self.visible(x)
return x
class ThermalBackbone(nn.Cell):
"""ThermalBackbone"""
def __init__(self, num_class=395, arch="resnet50", pretrain=""):
super(ThermalBackbone, self).__init__()
self.thermal = resnet50(num_class=num_class, pretrain=pretrain)
self.arch = arch
def construct(self, x):
x = self.thermal(x)
return x
class SharedBackbone(nn.Cell):
"""SharedBackbone"""
def __init__(self, num_class=395, arch="resnet50", pretrain=""):
super(SharedBackbone, self).__init__()
self.base = resnet50(num_class=num_class, pretrain=pretrain)
self.arch = arch
def construct(self, x):
x = self.base(x)
return x
class EmbedNet(nn.Cell):
"""EmbedNet"""
def __init__(self, num_class=395, drop=0.2, z_dim=512, arch="resnet50", pretrain=""):
super(EmbedNet, self).__init__()
self.rgb_backbone = VisibleBackbone(num_class=num_class, arch=arch, pretrain=pretrain)
self.ir_backbone = ThermalBackbone(num_class=num_class, arch=arch, pretrain=pretrain)
self.shared_backbone = SharedBackbone(num_class=num_class, arch=arch, pretrain=pretrain)
pool_dim = 2048
self.rgb_bottleneck = VIB(in_ch=pool_dim, z_dim=z_dim, num_class=num_class)
self.ir_bottleneck = VIB(in_ch=pool_dim, z_dim=z_dim, num_class=num_class)
self.shared_bottleneck = VIB(in_ch=pool_dim, z_dim=z_dim, num_class=num_class)
self.dropout = drop
self.l2norm = Normalize(2)
self.avgpool = P.ReduceMean(keep_dims=True)
self.cat = P.Concat()
self.cat_dim1 = P.Concat(axis=1)
def construct(self, x1, x2=None, mode=0):
"""build"""
# visible branch
if mode == 0:
x = self.cat((x1, x2))
else:
x = x1
# backbone 输出为二元组(feature, logits),下同
v_observation = self.rgb_backbone(x)
v_representation = self.rgb_bottleneck(v_observation[0])
# infarred branch
x_grey = to_edge(x)
i_ms_input = self.cat_dim1([x_grey, x_grey, x_grey])
i_observation = self.ir_backbone(i_ms_input)
i_representation = self.ir_bottleneck(i_observation[0])
# modal shared branch
v_ms_observation = self.shared_backbone(x)
v_ms_representation = self.shared_bottleneck(v_ms_observation[0])
i_ms_observation = self.shared_backbone(i_ms_input)
i_ms_representation = self.shared_bottleneck(i_ms_observation[0])
if self.training:
return v_observation, v_representation, v_ms_observation, v_ms_representation, \
i_observation, i_representation, i_ms_observation, i_ms_representation
v_observation = self.l2norm(v_observation[0])
v_representation = self.l2norm(v_representation[0])
v_ms_observation = self.l2norm(v_ms_observation[0])
v_ms_representation = self.l2norm(v_ms_representation[0])
i_observation = self.l2norm(i_observation[0])
i_representation = self.l2norm(i_representation[0])
i_ms_observation = self.l2norm(i_ms_observation[0])
i_ms_representation = self.l2norm(i_ms_representation[0])
feat_v = self.cat_dim1((v_observation, v_representation))
feat_i = self.cat_dim1((i_observation, i_representation))
feat_v_shared = self.cat_dim1((v_ms_observation, v_ms_representation))
feat_i_shared = self.cat_dim1((i_ms_observation, i_ms_representation))
return feat_v, feat_v_shared, feat_i, feat_i_shared
| [
"mindspore.common.initializer.Zero",
"numpy.ones",
"psutil.Process",
"mindspore.common.initializer.random_normal",
"mindspore.ops.Pow",
"numpy.zeros",
"mindspore.ops.ReduceSum",
"model.resnet.resnet50",
"mindspore.ops.ReduceMean",
"os.getpid",
"mindspore.common.initializer.Normal",
"mindspore.... | [((1060, 1071), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1069, 1071), False, 'import os\n'), ((1081, 1100), 'psutil.Process', 'psutil.Process', (['pid'], {}), '(pid)\n', (1095, 1100), False, 'import psutil\n'), ((2104, 2156), 'mindspore.common.initializer.random_normal', 'random_normal', (['arr.shape', 'seed', 'seed2', 'output_tensor'], {}), '(arr.shape, seed, seed2, output_tensor)\n', (2117, 2156), False, 'from mindspore.common.initializer import Initializer, _assignment, random_normal\n'), ((2245, 2274), 'mindspore.common.initializer._assignment', '_assignment', (['arr', 'output_data'], {}), '(arr, output_data)\n', (2256, 2274), False, 'from mindspore.common.initializer import Initializer, _assignment, random_normal\n'), ((3564, 3571), 'mindspore.ops.Pow', 'P.Pow', ([], {}), '()\n', (3569, 3571), True, 'import mindspore.ops as P\n'), ((3591, 3618), 'mindspore.ops.ReduceSum', 'P.ReduceSum', ([], {'keep_dims': '(True)'}), '(keep_dims=True)\n', (3602, 3618), True, 'import mindspore.ops as P\n'), ((3638, 3645), 'mindspore.ops.Div', 'P.Div', ([], {}), '()\n', (3643, 3645), True, 'import mindspore.ops as P\n'), ((4046, 4094), 'model.resnet.resnet50', 'resnet50', ([], {'num_class': 'num_class', 'pretrain': 'pretrain'}), '(num_class=num_class, pretrain=pretrain)\n', (4054, 4094), False, 'from model.resnet import resnet50\n'), ((4396, 4444), 'model.resnet.resnet50', 'resnet50', ([], {'num_class': 'num_class', 'pretrain': 'pretrain'}), '(num_class=num_class, pretrain=pretrain)\n', (4404, 4444), False, 'from model.resnet import resnet50\n'), ((4740, 4788), 'model.resnet.resnet50', 'resnet50', ([], {'num_class': 'num_class', 'pretrain': 'pretrain'}), '(num_class=num_class, pretrain=pretrain)\n', (4748, 4788), False, 'from model.resnet import resnet50\n'), ((5405, 5458), 'model.vib.VIB', 'VIB', ([], {'in_ch': 'pool_dim', 'z_dim': 'z_dim', 'num_class': 'num_class'}), '(in_ch=pool_dim, z_dim=z_dim, num_class=num_class)\n', (5408, 5458), False, 'from model.vib import VIB\n'), ((5488, 5541), 'model.vib.VIB', 'VIB', ([], {'in_ch': 'pool_dim', 'z_dim': 'z_dim', 'num_class': 'num_class'}), '(in_ch=pool_dim, z_dim=z_dim, num_class=num_class)\n', (5491, 5541), False, 'from model.vib import VIB\n'), ((5575, 5628), 'model.vib.VIB', 'VIB', ([], {'in_ch': 'pool_dim', 'z_dim': 'z_dim', 'num_class': 'num_class'}), '(in_ch=pool_dim, z_dim=z_dim, num_class=num_class)\n', (5578, 5628), False, 'from model.vib import VIB\n'), ((5718, 5746), 'mindspore.ops.ReduceMean', 'P.ReduceMean', ([], {'keep_dims': '(True)'}), '(keep_dims=True)\n', (5730, 5746), True, 'import mindspore.ops as P\n'), ((5766, 5776), 'mindspore.ops.Concat', 'P.Concat', ([], {}), '()\n', (5774, 5776), True, 'import mindspore.ops as P\n'), ((5801, 5817), 'mindspore.ops.Concat', 'P.Concat', ([], {'axis': '(1)'}), '(axis=1)\n', (5809, 5817), True, 'import mindspore.ops as P\n'), ((2008, 2045), 'numpy.zeros', 'np.zeros', (['arr.shape'], {'dtype': 'np.float32'}), '(arr.shape, dtype=np.float32)\n', (2016, 2045), True, 'import numpy as np\n'), ((2467, 2513), 'mindspore.common.initializer.HeNormal', 'init.HeNormal', ([], {'negative_slope': '(0)', 'mode': '"""fan_in"""'}), "(negative_slope=0, mode='fan_in')\n", (2480, 2513), True, 'import mindspore.common.initializer as init\n'), ((3232, 3256), 'mindspore.common.initializer.Normal', 'init.Normal', ([], {'sigma': '(0.001)'}), '(sigma=0.001)\n', (3243, 3256), True, 'import mindspore.common.initializer as init\n'), ((2048, 2084), 'numpy.ones', 'np.ones', (['arr.shape'], {'dtype': 'np.float32'}), '(arr.shape, dtype=np.float32)\n', (2055, 2084), True, 'import numpy as np\n'), ((2645, 2692), 'mindspore.common.initializer.HeNormal', 'init.HeNormal', ([], {'negative_slope': '(0)', 'mode': '"""fan_out"""'}), "(negative_slope=0, mode='fan_out')\n", (2658, 2692), True, 'import mindspore.common.initializer as init\n'), ((2768, 2779), 'mindspore.common.initializer.Zero', 'init.Zero', ([], {}), '()\n', (2777, 2779), True, 'import mindspore.common.initializer as init\n'), ((3353, 3364), 'mindspore.common.initializer.Zero', 'init.Zero', ([], {}), '()\n', (3362, 3364), True, 'import mindspore.common.initializer as init\n'), ((3004, 3015), 'mindspore.common.initializer.Zero', 'init.Zero', ([], {}), '()\n', (3013, 3015), True, 'import mindspore.common.initializer as init\n')] |
"""
An example training an SGDClassifier, performing grid search
using TuneGridSearchCV.
This example uses early stopping to further improve runtimes
by eliminating worse hyperparameter choices early based off
of its average test score from cross validation.
"""
from tune_sklearn import TuneGridSearchCV
from sklearn.linear_model import SGDClassifier
from sklearn import datasets
from sklearn.model_selection import train_test_split
from ray.tune.schedulers import MedianStoppingRule
import numpy as np
digits = datasets.load_digits()
x = digits.data
y = digits.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.2)
clf = SGDClassifier()
parameter_grid = {"alpha": [1e-4, 1e-1, 1], "epsilon": [0.01, 0.1]}
scheduler = MedianStoppingRule(grace_period=10.0)
tune_search = TuneGridSearchCV(
clf,
parameter_grid,
early_stopping=scheduler,
max_iters=10,
)
tune_search.fit(x_train, y_train)
pred = tune_search.predict(x_test)
accuracy = np.count_nonzero(np.array(pred) == np.array(y_test)) / len(pred)
print(accuracy)
| [
"sklearn.linear_model.SGDClassifier",
"sklearn.model_selection.train_test_split",
"tune_sklearn.TuneGridSearchCV",
"ray.tune.schedulers.MedianStoppingRule",
"sklearn.datasets.load_digits",
"numpy.array"
] | [((516, 538), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {}), '()\n', (536, 538), False, 'from sklearn import datasets\n'), ((608, 645), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)'}), '(x, y, test_size=0.2)\n', (624, 645), False, 'from sklearn.model_selection import train_test_split\n'), ((652, 667), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '()\n', (665, 667), False, 'from sklearn.linear_model import SGDClassifier\n'), ((749, 786), 'ray.tune.schedulers.MedianStoppingRule', 'MedianStoppingRule', ([], {'grace_period': '(10.0)'}), '(grace_period=10.0)\n', (767, 786), False, 'from ray.tune.schedulers import MedianStoppingRule\n'), ((802, 879), 'tune_sklearn.TuneGridSearchCV', 'TuneGridSearchCV', (['clf', 'parameter_grid'], {'early_stopping': 'scheduler', 'max_iters': '(10)'}), '(clf, parameter_grid, early_stopping=scheduler, max_iters=10)\n', (818, 879), False, 'from tune_sklearn import TuneGridSearchCV\n'), ((997, 1011), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (1005, 1011), True, 'import numpy as np\n'), ((1015, 1031), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (1023, 1031), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import rdkit
from moses.metrics.metrics import get_all_metrics
from moses.script_utils import read_smiles_csv
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
def main(config, print_metrics=True):
test = None
test_scaffolds = None
ptest = None
ptest_scaffolds = None
train = None
if config.test_path:
test = read_smiles_csv(config.test_path)
if config.test_scaffolds_path is not None:
test_scaffolds = read_smiles_csv(config.test_scaffolds_path)
if config.train_path is not None:
train = read_smiles_csv(config.train_path)
if config.ptest_path is not None:
ptest = np.load(
config.ptest_path,
allow_pickle=True)['stats'].item()
if config.ptest_scaffolds_path is not None:
ptest_scaffolds = np.load(
config.ptest_scaffolds_path,
allow_pickle=True)['stats'].item()
gen = read_smiles_csv(config.gen_path)
metrics = get_all_metrics(gen=gen, k=config.ks, n_jobs=config.n_jobs,
device=config.device,
test_scaffolds=test_scaffolds,
ptest=ptest, ptest_scaffolds=ptest_scaffolds,
test=test, train=train)
if print_metrics:
for name, value in metrics.items():
print('{},{}'.format(name, value))
else:
return metrics
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--test_path',
type=str, required=False,
help='Path to test molecules csv')
parser.add_argument('--test_scaffolds_path',
type=str, required=False,
help='Path to scaffold test molecules csv')
parser.add_argument('--train_path',
type=str, required=False,
help='Path to train molecules csv')
parser.add_argument('--ptest_path',
type=str, required=False,
help='Path to precalculated test npz')
parser.add_argument('--ptest_scaffolds_path',
type=str, required=False,
help='Path to precalculated scaffold test npz')
parser.add_argument('--gen_path',
type=str, required=True,
help='Path to generated molecules csv')
parser.add_argument('--ks', '--unique_k',
nargs='+', default=[1000, 10000],
type=int,
help='Number of molecules to calculate uniqueness at.'
'Multiple values are possible. Defaults to '
'--unique_k 1000 10000')
parser.add_argument('--n_jobs',
type=int, default=1,
help='Number of processes to run metrics')
parser.add_argument('--device',
type=str, default='cpu',
help='GPU device id (`cpu` or `cuda:n`)')
return parser
if __name__ == "__main__":
parser = get_parser()
config = parser.parse_known_args()[0]
main(config)
| [
"moses.script_utils.read_smiles_csv",
"argparse.ArgumentParser",
"rdkit.RDLogger.logger",
"moses.metrics.metrics.get_all_metrics",
"numpy.load"
] | [((152, 175), 'rdkit.RDLogger.logger', 'rdkit.RDLogger.logger', ([], {}), '()\n', (173, 175), False, 'import rdkit\n'), ((957, 989), 'moses.script_utils.read_smiles_csv', 'read_smiles_csv', (['config.gen_path'], {}), '(config.gen_path)\n', (972, 989), False, 'from moses.script_utils import read_smiles_csv\n'), ((1004, 1196), 'moses.metrics.metrics.get_all_metrics', 'get_all_metrics', ([], {'gen': 'gen', 'k': 'config.ks', 'n_jobs': 'config.n_jobs', 'device': 'config.device', 'test_scaffolds': 'test_scaffolds', 'ptest': 'ptest', 'ptest_scaffolds': 'ptest_scaffolds', 'test': 'test', 'train': 'train'}), '(gen=gen, k=config.ks, n_jobs=config.n_jobs, device=config.\n device, test_scaffolds=test_scaffolds, ptest=ptest, ptest_scaffolds=\n ptest_scaffolds, test=test, train=train)\n', (1019, 1196), False, 'from moses.metrics.metrics import get_all_metrics\n'), ((1487, 1512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1510, 1512), False, 'import argparse\n'), ((396, 429), 'moses.script_utils.read_smiles_csv', 'read_smiles_csv', (['config.test_path'], {}), '(config.test_path)\n', (411, 429), False, 'from moses.script_utils import read_smiles_csv\n'), ((502, 545), 'moses.script_utils.read_smiles_csv', 'read_smiles_csv', (['config.test_scaffolds_path'], {}), '(config.test_scaffolds_path)\n', (517, 545), False, 'from moses.script_utils import read_smiles_csv\n'), ((600, 634), 'moses.script_utils.read_smiles_csv', 'read_smiles_csv', (['config.train_path'], {}), '(config.train_path)\n', (615, 634), False, 'from moses.script_utils import read_smiles_csv\n'), ((689, 734), 'numpy.load', 'np.load', (['config.ptest_path'], {'allow_pickle': '(True)'}), '(config.ptest_path, allow_pickle=True)\n', (696, 734), True, 'import numpy as np\n'), ((850, 905), 'numpy.load', 'np.load', (['config.ptest_scaffolds_path'], {'allow_pickle': '(True)'}), '(config.ptest_scaffolds_path, allow_pickle=True)\n', (857, 905), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The Ravens Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Insertion Tasks."""
import numpy as np
from ravens.tasks.task import Task
from ravens.utils import utils
import pybullet as p
class BlockInsertion(Task):
"""Insertion Task - Base Variant."""
def __init__(self):
super().__init__()
self.max_steps = 3
def reset(self, env):
super().reset(env)
block_id = self.add_block(env)
targ_pose = self.add_fixture(env)
# self.goals.append(
# ([block_id], [2 * np.pi], [[0]], [targ_pose], 'pose', None, 1.))
self.goals.append(([(block_id, (2 * np.pi, None))], np.int32([[1]]),
[targ_pose], False, True, 'pose', None, 1))
def add_block(self, env):
"""Add L-shaped block."""
size = (0.1, 0.1, 0.04)
urdf = 'insertion/ell.urdf'
pose = self.get_random_pose(env, size)
return env.add_object(urdf, pose)
def add_fixture(self, env):
"""Add L-shaped fixture to place block."""
size = (0.1, 0.1, 0.04)
urdf = 'insertion/fixture.urdf'
pose = self.get_random_pose(env, size)
env.add_object(urdf, pose, 'fixed')
return pose
class BlockInsertionTranslation(BlockInsertion):
"""Insertion Task - Translation Variant."""
def get_random_pose(self, env, obj_size):
pose = super(BlockInsertionTranslation, self).get_random_pose(env, obj_size)
pos, rot = pose
rot = utils.eulerXYZ_to_quatXYZW((0, 0, np.pi / 2))
return pos, rot
# Visualization positions.
# block_pos = (0.40, -0.15, 0.02)
# fixture_pos = (0.65, 0.10, 0.02)
class BlockInsertionEasy(BlockInsertionTranslation):
"""Insertion Task - Easy Variant."""
def add_block(self, env):
"""Add L-shaped block in fixed position."""
# size = (0.1, 0.1, 0.04)
urdf = 'insertion/ell.urdf'
pose = ((0.5, 0, 0.02), p.getQuaternionFromEuler((0, 0, np.pi / 2)))
return env.add_object(urdf, pose)
class BlockInsertionSixDof(BlockInsertion):
"""Insertion Task - 6DOF Variant."""
def __init__(self):
super().__init__()
self.sixdof = True
self.pos_eps = 0.02
def add_fixture(self, env):
"""Add L-shaped fixture to place block."""
size = (0.1, 0.1, 0.04)
urdf = 'insertion/fixture.urdf'
pose = self.get_random_pose_6dof(env, size)
env.add_object(urdf, pose, 'fixed')
return pose
def get_random_pose_6dof(self, env, obj_size):
pos, rot = super(BlockInsertionSixDof, self).get_random_pose(env, obj_size)
z = (np.random.rand() / 10) + 0.03
pos = (pos[0], pos[1], obj_size[2] / 2 + z)
roll = (np.random.rand() - 0.5) * np.pi / 2
pitch = (np.random.rand() - 0.5) * np.pi / 2
yaw = np.random.rand() * 2 * np.pi
rot = utils.eulerXYZ_to_quatXYZW((roll, pitch, yaw))
return pos, rot
class BlockInsertionNoFixture(BlockInsertion):
"""Insertion Task - No Fixture Variant."""
def add_fixture(self, env):
"""Add target pose to place block."""
size = (0.1, 0.1, 0.04)
# urdf = 'insertion/fixture.urdf'
pose = self.get_random_pose(env, size)
return pose
# def reset(self, env, last_info=None):
# self.num_steps = 1
# self.goal = {'places': {}, 'steps': []}
# # Add L-shaped block.
# block_size = (0.1, 0.1, 0.04)
# block_urdf = 'insertion/ell.urdf'
# block_pose = self.get_random_pose(env, block_size)
# block_id = env.add_object(block_urdf, block_pose)
# self.goal['steps'].append({block_id: (2 * np.pi, [0])})
# # Add L-shaped target pose, but without actually adding it.
# if self.goal_cond_testing:
# assert last_info is not None
# self.goal['places'][0] = self._get_goal_info(last_info)
# # print('\nin insertion reset, goal: {}'.format(self.goal['places'][0]))
# else:
# hole_pose = self.get_random_pose(env, block_size)
# self.goal['places'][0] = hole_pose
# # print('\nin insertion reset, goal: {}'.format(hole_pose))
# def _get_goal_info(self, last_info):
# """Used to determine the goal given the last `info` dict."""
# position, rotation, _ = last_info[4] # block ID=4
# return (position, rotation)
| [
"pybullet.getQuaternionFromEuler",
"numpy.int32",
"ravens.utils.utils.eulerXYZ_to_quatXYZW",
"numpy.random.rand"
] | [((1923, 1968), 'ravens.utils.utils.eulerXYZ_to_quatXYZW', 'utils.eulerXYZ_to_quatXYZW', (['(0, 0, np.pi / 2)'], {}), '((0, 0, np.pi / 2))\n', (1949, 1968), False, 'from ravens.utils import utils\n'), ((3223, 3269), 'ravens.utils.utils.eulerXYZ_to_quatXYZW', 'utils.eulerXYZ_to_quatXYZW', (['(roll, pitch, yaw)'], {}), '((roll, pitch, yaw))\n', (3249, 3269), False, 'from ravens.utils import utils\n'), ((2353, 2396), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['(0, 0, np.pi / 2)'], {}), '((0, 0, np.pi / 2))\n', (2377, 2396), True, 'import pybullet as p\n'), ((1145, 1160), 'numpy.int32', 'np.int32', (['[[1]]'], {}), '([[1]])\n', (1153, 1160), True, 'import numpy as np\n'), ((2999, 3015), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3013, 3015), True, 'import numpy as np\n'), ((3184, 3200), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3198, 3200), True, 'import numpy as np\n'), ((3089, 3105), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3103, 3105), True, 'import numpy as np\n'), ((3138, 3154), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3152, 3154), True, 'import numpy as np\n')] |
"""
Blending module.
Check Blending_ section of W3C recommendation for blending mode definitions.
.. _Blending: https://www.w3.org/TR/compositing/#blending
"""
from __future__ import absolute_import, unicode_literals
import logging
from psd_tools.utils import new_registry
from psd_tools.constants import BlendMode
from psd_tools.terminology import Enum
logger = logging.getLogger(__name__)
BLEND_FUNCTIONS, register = new_registry()
def blend(backdrop, image, offset, mode=None):
from PIL import Image, ImageChops, ImageMath
# Align the canvas size.
if offset[0] < 0:
if image.width <= -offset[0]:
return backdrop
image = image.crop((-offset[0], 0, image.width, image.height))
offset = (0, offset[1])
if offset[1] < 0:
if image.height <= -offset[1]:
return backdrop
image = image.crop((0, -offset[1], image.width, image.height))
offset = (offset[0], 0)
# Operations must happen in RGBA in Pillow.
image_ = Image.new(image.mode, backdrop.size)
image_.paste(image, offset)
image = image_.convert('RGBA')
target_mode = backdrop.mode
if target_mode != 'RGBA':
backdrop = backdrop.convert('RGBA')
# Composite blended image.
if mode not in (BlendMode.NORMAL, Enum.Normal, None):
blend_func = BLEND_FUNCTIONS.get(mode, _normal)
image = _blend_image(backdrop, image, blend_func)
backdrop = Image.alpha_composite(backdrop, image)
if target_mode != 'RGBA':
backdrop = backdrop.convert(target_mode)
return backdrop
def _blend_image(backdrop, source, blend_fn):
from PIL import Image
import numpy as np
Cb = np.asarray(backdrop.convert('RGB')).astype(np.float) / 255.
Cs = np.asarray(source.convert('RGB')).astype(np.float) / 255.
Ab = np.asarray(backdrop.getchannel('A')).astype(np.float) / 255.
Ab = np.expand_dims(Ab, axis=2)
Cr = (1. - Ab) * Cs + Ab * blend_fn(Cs, Cb)
result = Image.fromarray((Cr * 255).round().astype(np.uint8), mode='RGB')
result.putalpha(source.getchannel('A'))
return result
@register(BlendMode.NORMAL)
@register(Enum.Normal)
def _normal(Cs, Cb):
return Cs
@register(BlendMode.MULTIPLY)
@register(Enum.Multiply)
def _multiply(Cs, Cb):
return Cs * Cb
@register(BlendMode.SCREEN)
@register(Enum.Screen)
def _screen(Cs, Cb):
return Cb + Cs - (Cb * Cs)
@register(BlendMode.OVERLAY)
@register(Enum.Overlay)
def _overlay(Cs, Cb):
return _hard_light(Cb, Cs)
@register(BlendMode.DARKEN)
@register(Enum.Darken)
def _darken(Cs, Cb):
import numpy as np
return np.minimum(Cb, Cs)
@register(BlendMode.LIGHTEN)
@register(Enum.Lighten)
def _lighten(Cs, Cb):
import numpy as np
return np.maximum(Cb, Cs)
@register(BlendMode.COLOR_DODGE)
@register(Enum.ColorDodge)
def _color_dodge(Cs, Cb, s=1.0):
import numpy as np
B = np.zeros_like(Cs)
B[Cs == 1] = 1
B[Cb == 0] = 0
index = (Cs != 1) & (Cb != 0)
B[index] = np.minimum(1, Cb[index] / (s * (1 - Cs[index])))
return B
@register(BlendMode.LINEAR_DODGE)
@register(b'linearDodge')
def _linear_dodge(Cs, Cb):
import numpy as np
return np.minimum(1, Cb + Cs)
@register(BlendMode.COLOR_BURN)
@register(Enum.ColorBurn)
def _color_burn(Cs, Cb, s=1.0):
import numpy as np
B = np.zeros_like(Cb)
B[Cb == 1] = 1
index = (Cb != 1) & (Cs != 0)
B[index] = 1 - np.minimum(1, (1 - Cb[index]) / (s * Cs[index]))
return B
@register(BlendMode.LINEAR_BURN)
@register(b'linearBurn')
def _linear_burn(Cs, Cb):
import numpy as np
return np.maximum(0, Cb + Cs - 1)
@register(BlendMode.HARD_LIGHT)
@register(Enum.HardLight)
def _hard_light(Cs, Cb):
index = Cs > 0.5
B = _multiply(Cs, Cb)
B[index] = _screen(Cs, Cb)[index]
return B
@register(BlendMode.SOFT_LIGHT)
@register(Enum.SoftLight)
def _soft_light(Cs, Cb):
import numpy as np
index = Cs <= 0.25
D = np.sqrt(Cb)
D[index] = ((16 * Cb[index] - 12) * Cb[index] + 4) * Cb[index]
index = Cs <= 0.5
B = Cb + (2 * Cs - 1) * (D - Cb)
B[index] = Cb[index] - (1 - 2 * Cs[index]) * Cb[index] * (1 - Cb[index])
return B
@register(BlendMode.VIVID_LIGHT)
@register(b'vividLight')
def _vivid_light(Cs, Cb):
"""
Burns or dodges the colors by increasing or decreasing the contrast,
depending on the blend color. If the blend color (light source) is lighter
than 50% gray, the image is lightened by decreasing the contrast. If the
blend color is darker than 50% gray, the image is darkened by increasing
the contrast.
"""
# TODO: Still inaccurate.
index = Cs > 0.5
B = _color_dodge(Cs, Cb, 128)
B[index] = _color_burn(Cs, Cb, 128)[index]
return B
@register(BlendMode.LINEAR_LIGHT)
@register(b'linearLight')
def _linear_light(Cs, Cb):
index = Cs > 0.5
B = _linear_burn(Cs, Cb)
B[index] = _linear_dodge(Cs, Cb)[index]
return B
@register(BlendMode.PIN_LIGHT)
@register(b'pinLight')
def _pin_light(Cs, Cb):
index = Cs > 0.5
B = _darken(Cs, Cb)
B[index] = _lighten(Cs, Cb)[index]
return B
@register(BlendMode.DIFFERENCE)
@register(Enum.Difference)
def _difference(Cs, Cb):
import numpy as np
return np.abs(Cb - Cs)
@register(BlendMode.EXCLUSION)
@register(Enum.Exclusion)
def _exclusion(Cs, Cb):
return Cb + Cs - 2 * Cb * Cs
@register(BlendMode.SUBTRACT)
@register(b'blendSubtraction')
def _subtract(Cs, Cb):
import numpy as np
return np.maximum(0, Cb - Cs)
@register(BlendMode.HARD_MIX)
@register(b'hardMix')
def _hard_mix(Cs, Cb):
B = Cb.copy()
B[(Cs + Cb) < 1] = 0
return B
@register(BlendMode.DIVIDE)
@register(b'blendDivide')
def _divide(Cs, Cb):
B = Cb.copy()
index = Cs > 0
B[index] = Cb[index] / Cs[index] # Seems incorrect...
return B
@register(BlendMode.HUE)
@register(Enum.Hue)
def _hue(Cs, Cb):
import numpy as np
hs, ls, ss = rgb_to_hls(Cs)
hb, lb, sb = rgb_to_hls(Cb)
return hls_to_rgb(hs, lb, sb)
@register(BlendMode.SATURATION)
@register(Enum.Saturation)
def _saturation(Cs, Cb):
import numpy as np
hs, ls, ss = rgb_to_hls(Cs)
hb, lb, sb = rgb_to_hls(Cb)
return hls_to_rgb(hb, lb, ss)
@register(BlendMode.COLOR)
@register(Enum.Color)
def _color(Cs, Cb):
import numpy as np
hs, ls, ss = rgb_to_hls(Cs)
hb, lb, sb = rgb_to_hls(Cb)
return hls_to_rgb(hs, lb, ss)
@register(BlendMode.LUMINOSITY)
@register(Enum.Luminosity)
def _saturation(Cs, Cb):
import numpy as np
hs, ls, ss = rgb_to_hls(Cs)
hb, lb, sb = rgb_to_hls(Cb)
return hls_to_rgb(hb, ls, sb)
# BlendMode.DISSOLVE: _dissolve,
# BlendMode.DARKER_COLOR: _darker_color,
# BlendMode.LIGHTER_COLOR: _lighter_color,
# Enum.Dissolve: _dissolve,
# b'darkerColor': _darker_color,
# b'lighterColor': _lighter_color,
def rgb_to_hls(rgb):
"""RGB to HSL conversion.
See colorsys module.
"""
import numpy as np
maxc = np.max(rgb, axis=2)
minc = np.min(rgb, axis=2)
nonzero_index = (minc < maxc)
c_diff = maxc - minc
l = (minc + maxc) / 2.0
s = np.zeros_like(l)
h = np.zeros_like(l)
index = nonzero_index
s[index] = c_diff[index] / (2.0 - maxc[index] - minc[index])
index = (l <= 0.5) & nonzero_index
s[index] = c_diff[index] / (maxc[index] + minc[index])
rc, gc, bc = (
maxc[nonzero_index] -
rgb[:, :, i][nonzero_index] / c_diff[nonzero_index] for i in range(3)
)
hc = 4.0 + gc - rc # 4 + gc - rc
index = (rgb[:, :, 1][nonzero_index] == maxc[nonzero_index])
hc[index] = 2.0 + rc[index] - bc[index] # 2 + rc - bc
index = (rgb[:, :, 0][nonzero_index] == maxc[nonzero_index])
hc[index] = bc[index] - gc[index] # bc - gc
h[nonzero_index] = (hc / 6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
"""HSL to RGB conversion.
See colorsys module.
"""
import numpy as np
ONE_THIRD = 1. / 3.
TWO_THIRD = 2. / 3.
ONE_SIXTH = 1. / 6.
r, g, b = np.copy(l), np.copy(l), np.copy(l)
nonzero_index = (s != 0.)
m2 = l + s - (l * s)
index = l <= 0.5
m2[index] = l[index] * (1.0 + s[index])
m1 = 2.0 * l - m2
def _v(m1, m2, hue):
hue = hue % 1.0
c = np.copy(m1)
index = hue < TWO_THIRD
c[index] = m1[index] + (m2[index] -
m1[index]) * (TWO_THIRD - hue[index]) * 6.0
index = hue < 0.5
c[index] = m2[index]
index = hue < ONE_SIXTH
c[index] = m1[index] + (m2[index] - m1[index]) * hue[index] * 6.0
return c
r[nonzero_index] = _v(m1, m2, h + ONE_THIRD)[nonzero_index]
g[nonzero_index] = _v(m1, m2, h)[nonzero_index]
b[nonzero_index] = _v(m1, m2, h - ONE_THIRD)[nonzero_index]
return np.stack((r, g, b), axis=2)
| [
"logging.getLogger",
"numpy.abs",
"numpy.copy",
"numpy.sqrt",
"numpy.minimum",
"PIL.Image.new",
"numpy.min",
"numpy.max",
"PIL.Image.alpha_composite",
"numpy.stack",
"numpy.expand_dims",
"psd_tools.utils.new_registry",
"numpy.maximum",
"numpy.zeros_like"
] | [((367, 394), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (384, 394), False, 'import logging\n'), ((424, 438), 'psd_tools.utils.new_registry', 'new_registry', ([], {}), '()\n', (436, 438), False, 'from psd_tools.utils import new_registry\n'), ((1013, 1049), 'PIL.Image.new', 'Image.new', (['image.mode', 'backdrop.size'], {}), '(image.mode, backdrop.size)\n', (1022, 1049), False, 'from PIL import Image\n'), ((1443, 1481), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['backdrop', 'image'], {}), '(backdrop, image)\n', (1464, 1481), False, 'from PIL import Image\n'), ((1894, 1920), 'numpy.expand_dims', 'np.expand_dims', (['Ab'], {'axis': '(2)'}), '(Ab, axis=2)\n', (1908, 1920), True, 'import numpy as np\n'), ((2617, 2635), 'numpy.minimum', 'np.minimum', (['Cb', 'Cs'], {}), '(Cb, Cs)\n', (2627, 2635), True, 'import numpy as np\n'), ((2747, 2765), 'numpy.maximum', 'np.maximum', (['Cb', 'Cs'], {}), '(Cb, Cs)\n', (2757, 2765), True, 'import numpy as np\n'), ((2892, 2909), 'numpy.zeros_like', 'np.zeros_like', (['Cs'], {}), '(Cs)\n', (2905, 2909), True, 'import numpy as np\n'), ((2997, 3045), 'numpy.minimum', 'np.minimum', (['(1)', '(Cb[index] / (s * (1 - Cs[index])))'], {}), '(1, Cb[index] / (s * (1 - Cs[index])))\n', (3007, 3045), True, 'import numpy as np\n'), ((3182, 3204), 'numpy.minimum', 'np.minimum', (['(1)', '(Cb + Cs)'], {}), '(1, Cb + Cs)\n', (3192, 3204), True, 'import numpy as np\n'), ((3328, 3345), 'numpy.zeros_like', 'np.zeros_like', (['Cb'], {}), '(Cb)\n', (3341, 3345), True, 'import numpy as np\n'), ((3600, 3626), 'numpy.maximum', 'np.maximum', (['(0)', '(Cb + Cs - 1)'], {}), '(0, Cb + Cs - 1)\n', (3610, 3626), True, 'import numpy as np\n'), ((3949, 3960), 'numpy.sqrt', 'np.sqrt', (['Cb'], {}), '(Cb)\n', (3956, 3960), True, 'import numpy as np\n'), ((5241, 5256), 'numpy.abs', 'np.abs', (['(Cb - Cs)'], {}), '(Cb - Cs)\n', (5247, 5256), True, 'import numpy as np\n'), ((5493, 5515), 'numpy.maximum', 'np.maximum', (['(0)', '(Cb - Cs)'], {}), '(0, Cb - Cs)\n', (5503, 5515), True, 'import numpy as np\n'), ((6965, 6984), 'numpy.max', 'np.max', (['rgb'], {'axis': '(2)'}), '(rgb, axis=2)\n', (6971, 6984), True, 'import numpy as np\n'), ((6996, 7015), 'numpy.min', 'np.min', (['rgb'], {'axis': '(2)'}), '(rgb, axis=2)\n', (7002, 7015), True, 'import numpy as np\n'), ((7112, 7128), 'numpy.zeros_like', 'np.zeros_like', (['l'], {}), '(l)\n', (7125, 7128), True, 'import numpy as np\n'), ((7137, 7153), 'numpy.zeros_like', 'np.zeros_like', (['l'], {}), '(l)\n', (7150, 7153), True, 'import numpy as np\n'), ((8787, 8814), 'numpy.stack', 'np.stack', (['(r, g, b)'], {'axis': '(2)'}), '((r, g, b), axis=2)\n', (8795, 8814), True, 'import numpy as np\n'), ((3418, 3466), 'numpy.minimum', 'np.minimum', (['(1)', '((1 - Cb[index]) / (s * Cs[index]))'], {}), '(1, (1 - Cb[index]) / (s * Cs[index]))\n', (3428, 3466), True, 'import numpy as np\n'), ((8013, 8023), 'numpy.copy', 'np.copy', (['l'], {}), '(l)\n', (8020, 8023), True, 'import numpy as np\n'), ((8025, 8035), 'numpy.copy', 'np.copy', (['l'], {}), '(l)\n', (8032, 8035), True, 'import numpy as np\n'), ((8037, 8047), 'numpy.copy', 'np.copy', (['l'], {}), '(l)\n', (8044, 8047), True, 'import numpy as np\n'), ((8253, 8264), 'numpy.copy', 'np.copy', (['m1'], {}), '(m1)\n', (8260, 8264), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as la
bear_black = (0.141, 0.11, 0.11)
bear_white = (0.89, 0.856, 0.856)
magenta = (0xfc / 255, 0x75 / 255, 0xdb / 255) # Brighter magenta
orange = (218 / 255, 171 / 255, 115 / 255)
green = (175 / 255, 219 / 255, 133 / 255)
white = (240 / 255, 245 / 255, 250 / 255)
blue1 = (70 / 255, 101 / 255, 137 / 255)
blue2 = (122 / 255, 174 / 255, 215 / 255)
def gsBasis(A):
B = np.array(A, dtype=np.float_)
B[:, 0] = B[:, 0] / la.norm(B[:, 0])
B[:, 1] = B[:, 1] - B[:, 1] @ B[:, 0] * B[:, 0]
if la.norm(B[:, 1]) > 1e-14:
B[:, 1] = B[:, 1] / la.norm(B[:, 1])
else:
B[:, 1] = np.zeros_like(B[:, 1])
return B
def draw_mirror(bearVectors):
fig, ax = plt.subplots(figsize=(12, 12), dpi=80)
ax.set_xlim([-3.50, 3.50])
ax.set_ylim([-3.50, 3.50])
ax.set_aspect(1)
# ax.set_axis_bgcolor(blue1)
ax.set_facecolor(blue1)
gs = gsBasis(bearVectors)
ax.plot([gs[0, 0] * -5, gs[0, 0] * 5], [gs[1, 0] * -5, gs[1, 0] * 5], lw=2, color=green, zorder=4)
ax.fill([
-5 * gs[0, 0], -5 * gs[0, 0] - 5 * gs[0, 1], 5 * gs[0, 0] - 5 * gs[0, 1], 5 * gs[0, 0]
], [
-5 * gs[1, 0], -5 * gs[1, 0] - 5 * gs[1, 1], 5 * gs[1, 0] - 5 * gs[1, 1], 5 * gs[1, 0]
], color=blue2, zorder=0)
ax.arrow(0, 0, bearVectors[0, 0], bearVectors[1, 0], lw=3, color=orange, zorder=5, head_width=0.1)
ax.arrow(0, 0, bearVectors[0, 1], bearVectors[1, 1], lw=3, color=orange, zorder=5, head_width=0.1)
ax.arrow(0, 0, gs[0, 0], gs[1, 0], lw=3, color=magenta, zorder=6, head_width=0.1)
ax.arrow(0, 0, gs[0, 1], gs[1, 1], lw=3, color=magenta, zorder=6, head_width=0.1)
return ax
bear_black_fur = np.array(
[[2.0030351, 2.229253, 2.1639012, 2.0809546, 1.9728726,
1.8974666, 1.8924396, 2.0030351, np.nan, 2.7017972,
2.8500957, 2.9707453, 3.0159889, 2.94561, 2.8299874,
2.7017972, np.nan, 2.1639012, 2.2317666, 2.3147132,
2.299632, 2.2493613, 2.1890365, 2.1211711, 2.1337387,
2.1639012, np.nan, 2.4982011, 2.5610936, 2.6213642,
2.633986, 2.5536071, 2.5057417, 2.4982011, np.nan,
2.2468478, 2.3247673, 2.4429034, 2.4303357, 2.3448755,
2.2820372, 2.2468478, np.nan, 2.1966706, 2.2722074,
2.4055076, 2.481933, 2.449941, 2.4001756, 2.3237501,
2.222442, 2.1984479, 2.1966706, np.nan, 1.847196,
1.7818441, 1.7290599, 1.6310321, 1.4575984, 1.3369488,
1.2791375, 1.3671112, 1.8044659, 1.9577914, 2.2367936,
2.5962289, 2.7520679, 2.9028799, 3.4005595, 3.3150993,
3.0511783, 2.9531506, 2.8676905, 2.7746897, 2.4052003,
2.2795237, 2.1639012, 1.847196, np.nan, 2.0491517,
2.5112591, 2.3175294, 2.1326865, 2.0491517],
[-1.3186252, -1.0902537, -0.99238015, -0.96477475, -0.99488975,
-1.1153494, -1.2408283, -1.3186252, np.nan, -1.1881273,
-1.0852346, -1.1454645, -1.3286636, -1.4666904, -1.4641808,
-1.1881273, np.nan, -1.5545256, -1.5219011, -1.4014413,
-1.3512497, -1.3412115, -1.3989317, -1.4917862, -1.5419777,
-1.5545256, np.nan, -1.4265371, -1.3964222, -1.4968054,
-1.6097363, -1.64738, -1.5545256, -1.4265371, np.nan,
-1.6423608, -1.6699662, -1.677495, -1.7176483, -1.7477632,
-1.7176483, -1.6423608, np.nan, -1.7223509, -1.7622781,
-1.7764744, -1.7613908, -1.8767359, -1.9805465, -1.9991791,
-1.9672374, -1.913114, -1.7223509, np.nan, -1.5043341,
-1.5444873, -1.486767, -1.1504836, -1.0626484, -1.11284,
-1.2558858, -1.7452537, -2.3902152, -2.4378972, -2.3575907,
-2.1467861, -2.2446597, -2.5527822, -2.5527822, -2.1919586,
-1.7828973, -1.6850238, -1.677495, -1.8431272, -2.028836,
-2.0363647, -1.9485295, -1.5043341, np.nan, -2.5527822,
-2.5527822, -2.4570104, -2.4463632, -2.5527822]])
bear_white_fur = np.array(
[[2.229253, 2.4680387, 2.7017972, 2.8299874, 2.8676905,
2.7746897, 2.4052003, 2.2795237, 2.1639012, 1.847196,
2.0030351, 2.229253, np.nan, 1.8044659, 1.8974666,
2.0491517, 2.1326865, 2.3175294, 2.5112591, 2.9028799,
2.7520679, 2.5962289, 2.2367936, 1.9577914, 1.8044659],
[-1.0902537, -1.0601388, -1.1881273, -1.4641809, -1.677495,
-1.8431272, -2.028836, -2.0363647, -1.9485295, -1.5043341,
-1.3186252, -1.0902537, np.nan, -2.3902152, -2.5527822,
-2.5527822, -2.4463632, -2.4570104, -2.5527822, -2.5527822,
-2.2446597, -2.1467861, -2.3575907, -2.4378972, -2.3902152]])
bear_face = np.array(
[[2.2419927, 2.2526567, 2.3015334, 2.3477442, 2.441943,
np.nan, 2.5258499, 2.5113971, 2.5327621, 2.5632387,
2.5780058, 2.5726645, 2.5475292, 2.5258499, np.nan,
2.2858075, 2.2704121, 2.2402497, 2.2283105, 2.2484187,
2.273554, 2.2858075],
[-1.7605035, -1.9432811, -1.9707865, -1.9654629, -1.781798,
np.nan, -1.4688862, -1.4942957, -1.5099806, -1.5112354,
-1.4877081, -1.466063, -1.4588479, -1.4688862, np.nan,
-1.4346933, -1.4506918, -1.4463002, -1.418381, -1.4055194,
-1.4083427, -1.4346933]])
| [
"matplotlib.use",
"numpy.array",
"numpy.linalg.norm",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] | [((19, 40), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (33, 40), False, 'import matplotlib\n'), ((1770, 3781), 'numpy.array', 'np.array', (['[[2.0030351, 2.229253, 2.1639012, 2.0809546, 1.9728726, 1.8974666, \n 1.8924396, 2.0030351, np.nan, 2.7017972, 2.8500957, 2.9707453, \n 3.0159889, 2.94561, 2.8299874, 2.7017972, np.nan, 2.1639012, 2.2317666,\n 2.3147132, 2.299632, 2.2493613, 2.1890365, 2.1211711, 2.1337387, \n 2.1639012, np.nan, 2.4982011, 2.5610936, 2.6213642, 2.633986, 2.5536071,\n 2.5057417, 2.4982011, np.nan, 2.2468478, 2.3247673, 2.4429034, \n 2.4303357, 2.3448755, 2.2820372, 2.2468478, np.nan, 2.1966706, \n 2.2722074, 2.4055076, 2.481933, 2.449941, 2.4001756, 2.3237501, \n 2.222442, 2.1984479, 2.1966706, np.nan, 1.847196, 1.7818441, 1.7290599,\n 1.6310321, 1.4575984, 1.3369488, 1.2791375, 1.3671112, 1.8044659, \n 1.9577914, 2.2367936, 2.5962289, 2.7520679, 2.9028799, 3.4005595, \n 3.3150993, 3.0511783, 2.9531506, 2.8676905, 2.7746897, 2.4052003, \n 2.2795237, 2.1639012, 1.847196, np.nan, 2.0491517, 2.5112591, 2.3175294,\n 2.1326865, 2.0491517], [-1.3186252, -1.0902537, -0.99238015, -\n 0.96477475, -0.99488975, -1.1153494, -1.2408283, -1.3186252, np.nan, -\n 1.1881273, -1.0852346, -1.1454645, -1.3286636, -1.4666904, -1.4641808, \n -1.1881273, np.nan, -1.5545256, -1.5219011, -1.4014413, -1.3512497, -\n 1.3412115, -1.3989317, -1.4917862, -1.5419777, -1.5545256, np.nan, -\n 1.4265371, -1.3964222, -1.4968054, -1.6097363, -1.64738, -1.5545256, -\n 1.4265371, np.nan, -1.6423608, -1.6699662, -1.677495, -1.7176483, -\n 1.7477632, -1.7176483, -1.6423608, np.nan, -1.7223509, -1.7622781, -\n 1.7764744, -1.7613908, -1.8767359, -1.9805465, -1.9991791, -1.9672374, \n -1.913114, -1.7223509, np.nan, -1.5043341, -1.5444873, -1.486767, -\n 1.1504836, -1.0626484, -1.11284, -1.2558858, -1.7452537, -2.3902152, -\n 2.4378972, -2.3575907, -2.1467861, -2.2446597, -2.5527822, -2.5527822, \n -2.1919586, -1.7828973, -1.6850238, -1.677495, -1.8431272, -2.028836, -\n 2.0363647, -1.9485295, -1.5043341, np.nan, -2.5527822, -2.5527822, -\n 2.4570104, -2.4463632, -2.5527822]]'], {}), '([[2.0030351, 2.229253, 2.1639012, 2.0809546, 1.9728726, 1.8974666,\n 1.8924396, 2.0030351, np.nan, 2.7017972, 2.8500957, 2.9707453, \n 3.0159889, 2.94561, 2.8299874, 2.7017972, np.nan, 2.1639012, 2.2317666,\n 2.3147132, 2.299632, 2.2493613, 2.1890365, 2.1211711, 2.1337387, \n 2.1639012, np.nan, 2.4982011, 2.5610936, 2.6213642, 2.633986, 2.5536071,\n 2.5057417, 2.4982011, np.nan, 2.2468478, 2.3247673, 2.4429034, \n 2.4303357, 2.3448755, 2.2820372, 2.2468478, np.nan, 2.1966706, \n 2.2722074, 2.4055076, 2.481933, 2.449941, 2.4001756, 2.3237501, \n 2.222442, 2.1984479, 2.1966706, np.nan, 1.847196, 1.7818441, 1.7290599,\n 1.6310321, 1.4575984, 1.3369488, 1.2791375, 1.3671112, 1.8044659, \n 1.9577914, 2.2367936, 2.5962289, 2.7520679, 2.9028799, 3.4005595, \n 3.3150993, 3.0511783, 2.9531506, 2.8676905, 2.7746897, 2.4052003, \n 2.2795237, 2.1639012, 1.847196, np.nan, 2.0491517, 2.5112591, 2.3175294,\n 2.1326865, 2.0491517], [-1.3186252, -1.0902537, -0.99238015, -\n 0.96477475, -0.99488975, -1.1153494, -1.2408283, -1.3186252, np.nan, -\n 1.1881273, -1.0852346, -1.1454645, -1.3286636, -1.4666904, -1.4641808, \n -1.1881273, np.nan, -1.5545256, -1.5219011, -1.4014413, -1.3512497, -\n 1.3412115, -1.3989317, -1.4917862, -1.5419777, -1.5545256, np.nan, -\n 1.4265371, -1.3964222, -1.4968054, -1.6097363, -1.64738, -1.5545256, -\n 1.4265371, np.nan, -1.6423608, -1.6699662, -1.677495, -1.7176483, -\n 1.7477632, -1.7176483, -1.6423608, np.nan, -1.7223509, -1.7622781, -\n 1.7764744, -1.7613908, -1.8767359, -1.9805465, -1.9991791, -1.9672374, \n -1.913114, -1.7223509, np.nan, -1.5043341, -1.5444873, -1.486767, -\n 1.1504836, -1.0626484, -1.11284, -1.2558858, -1.7452537, -2.3902152, -\n 2.4378972, -2.3575907, -2.1467861, -2.2446597, -2.5527822, -2.5527822, \n -2.1919586, -1.7828973, -1.6850238, -1.677495, -1.8431272, -2.028836, -\n 2.0363647, -1.9485295, -1.5043341, np.nan, -2.5527822, -2.5527822, -\n 2.4570104, -2.4463632, -2.5527822]])\n', (1778, 3781), True, 'import numpy as np\n'), ((3872, 4486), 'numpy.array', 'np.array', (['[[2.229253, 2.4680387, 2.7017972, 2.8299874, 2.8676905, 2.7746897, \n 2.4052003, 2.2795237, 2.1639012, 1.847196, 2.0030351, 2.229253, np.nan,\n 1.8044659, 1.8974666, 2.0491517, 2.1326865, 2.3175294, 2.5112591, \n 2.9028799, 2.7520679, 2.5962289, 2.2367936, 1.9577914, 1.8044659], [-\n 1.0902537, -1.0601388, -1.1881273, -1.4641809, -1.677495, -1.8431272, -\n 2.028836, -2.0363647, -1.9485295, -1.5043341, -1.3186252, -1.0902537,\n np.nan, -2.3902152, -2.5527822, -2.5527822, -2.4463632, -2.4570104, -\n 2.5527822, -2.5527822, -2.2446597, -2.1467861, -2.3575907, -2.4378972, \n -2.3902152]]'], {}), '([[2.229253, 2.4680387, 2.7017972, 2.8299874, 2.8676905, 2.7746897,\n 2.4052003, 2.2795237, 2.1639012, 1.847196, 2.0030351, 2.229253, np.nan,\n 1.8044659, 1.8974666, 2.0491517, 2.1326865, 2.3175294, 2.5112591, \n 2.9028799, 2.7520679, 2.5962289, 2.2367936, 1.9577914, 1.8044659], [-\n 1.0902537, -1.0601388, -1.1881273, -1.4641809, -1.677495, -1.8431272, -\n 2.028836, -2.0363647, -1.9485295, -1.5043341, -1.3186252, -1.0902537,\n np.nan, -2.3902152, -2.5527822, -2.5527822, -2.4463632, -2.4570104, -\n 2.5527822, -2.5527822, -2.2446597, -2.1467861, -2.3575907, -2.4378972, \n -2.3902152]])\n', (3880, 4486), True, 'import numpy as np\n'), ((4521, 5056), 'numpy.array', 'np.array', (['[[2.2419927, 2.2526567, 2.3015334, 2.3477442, 2.441943, np.nan, 2.5258499, \n 2.5113971, 2.5327621, 2.5632387, 2.5780058, 2.5726645, 2.5475292, \n 2.5258499, np.nan, 2.2858075, 2.2704121, 2.2402497, 2.2283105, \n 2.2484187, 2.273554, 2.2858075], [-1.7605035, -1.9432811, -1.9707865, -\n 1.9654629, -1.781798, np.nan, -1.4688862, -1.4942957, -1.5099806, -\n 1.5112354, -1.4877081, -1.466063, -1.4588479, -1.4688862, np.nan, -\n 1.4346933, -1.4506918, -1.4463002, -1.418381, -1.4055194, -1.4083427, -\n 1.4346933]]'], {}), '([[2.2419927, 2.2526567, 2.3015334, 2.3477442, 2.441943, np.nan, \n 2.5258499, 2.5113971, 2.5327621, 2.5632387, 2.5780058, 2.5726645, \n 2.5475292, 2.5258499, np.nan, 2.2858075, 2.2704121, 2.2402497, \n 2.2283105, 2.2484187, 2.273554, 2.2858075], [-1.7605035, -1.9432811, -\n 1.9707865, -1.9654629, -1.781798, np.nan, -1.4688862, -1.4942957, -\n 1.5099806, -1.5112354, -1.4877081, -1.466063, -1.4588479, -1.4688862,\n np.nan, -1.4346933, -1.4506918, -1.4463002, -1.418381, -1.4055194, -\n 1.4083427, -1.4346933]])\n', (4529, 5056), True, 'import numpy as np\n'), ((489, 517), 'numpy.array', 'np.array', (['A'], {'dtype': 'np.float_'}), '(A, dtype=np.float_)\n', (497, 517), True, 'import numpy as np\n'), ((799, 837), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 12)', 'dpi': '(80)'}), '(figsize=(12, 12), dpi=80)\n', (811, 837), True, 'import matplotlib.pyplot as plt\n'), ((542, 558), 'numpy.linalg.norm', 'la.norm', (['B[:, 0]'], {}), '(B[:, 0])\n', (549, 558), True, 'import numpy.linalg as la\n'), ((618, 634), 'numpy.linalg.norm', 'la.norm', (['B[:, 1]'], {}), '(B[:, 1])\n', (625, 634), True, 'import numpy.linalg as la\n'), ((717, 739), 'numpy.zeros_like', 'np.zeros_like', (['B[:, 1]'], {}), '(B[:, 1])\n', (730, 739), True, 'import numpy as np\n'), ((672, 688), 'numpy.linalg.norm', 'la.norm', (['B[:, 1]'], {}), '(B[:, 1])\n', (679, 688), True, 'import numpy.linalg as la\n')] |
import cv2, time
#TODO: fix ipcam
#import urllib2, base64
import numpy as np
class ipCamera(object):
def __init__(self,url, user = None, password = None):
self.url = url
auth_encoded = base64.encodestring('%s:%s' % (user, password))[:-1]
self.req = urllib2.Request(self.url)
self.req.add_header('Authorization', 'Basic %s' % auth_encoded)
def get_frame(self):
response = urllib2.urlopen(self.req)
img_array = np.asarray(bytearray(response.read()), dtype=np.uint8)
frame = cv2.imdecode(img_array, 1)
return frame
class Camera(object):
def __init__(self, camera = 0):
self.cam = cv2.VideoCapture(camera)
self.valid = False
try:
resp = self.cam.read()
self.shape = resp[1].shape
self.valid = True
except:
self.shape = None
def get_frame(self):
if self.valid:
_,frame = self.cam.read()
else:
frame = np.ones((480,640,3), dtype=np.uint8)
col = (0,256,256)
cv2.putText(frame, "(Error: Camera not accessible)",
(65,220), cv2.FONT_HERSHEY_PLAIN, 2, col)
return frame
def release(self):
self.cam.release() | [
"numpy.ones",
"cv2.VideoCapture",
"cv2.imdecode",
"cv2.putText"
] | [((540, 566), 'cv2.imdecode', 'cv2.imdecode', (['img_array', '(1)'], {}), '(img_array, 1)\n', (552, 566), False, 'import cv2, time\n'), ((667, 691), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera'], {}), '(camera)\n', (683, 691), False, 'import cv2, time\n'), ((1003, 1041), 'numpy.ones', 'np.ones', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (1010, 1041), True, 'import numpy as np\n'), ((1082, 1182), 'cv2.putText', 'cv2.putText', (['frame', '"""(Error: Camera not accessible)"""', '(65, 220)', 'cv2.FONT_HERSHEY_PLAIN', '(2)', 'col'], {}), "(frame, '(Error: Camera not accessible)', (65, 220), cv2.\n FONT_HERSHEY_PLAIN, 2, col)\n", (1093, 1182), False, 'import cv2, time\n')] |
"""
Test the multi-PCA module
"""
import numpy as np
from nose.tools import assert_raises
import nibabel
from nilearn.decomposition.multi_pca import MultiPCA
from nilearn.input_data import MultiNiftiMasker
def test_multi_pca():
# Smoke test the MultiPCA
# XXX: this is mostly a smoke test
shape = (6, 8, 10, 5)
affine = np.eye(4)
rng = np.random.RandomState(0)
# Create a "multi-subject" dataset
data = []
for i in range(8):
this_data = rng.normal(size=shape)
# Create fake activation to get non empty mask
this_data[2:4, 2:4, 2:4, :] += 10
data.append(nibabel.Nifti1Image(this_data, affine))
mask_img = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine)
multi_pca = MultiPCA(mask=mask_img, n_components=3)
# Test that the components are the same if we put twice the same data
components1 = multi_pca.fit(data).components_
components2 = multi_pca.fit(2 * data).components_
np.testing.assert_array_almost_equal(components1, components2)
# Smoke test fit with 'confounds' argument
confounds = [np.arange(10).reshape(5, 2)] * 8
multi_pca.fit(data, confounds=confounds)
# Smoke test that multi_pca also works with single subject data
multi_pca.fit(data[0])
# Check that asking for too little components raises a ValueError
multi_pca = MultiPCA()
assert_raises(ValueError, multi_pca.fit, data[:2])
# Smoke test the use of a masker and without CCA
multi_pca = MultiPCA(mask=MultiNiftiMasker(mask_args=dict(opening=0)),
do_cca=False, n_components=3)
multi_pca.fit(data[:2])
# Smoke test the transform and inverse_transform
multi_pca.inverse_transform(multi_pca.transform(data[-2:]))
# Smoke test to fit with no img
assert_raises(TypeError, multi_pca.fit)
| [
"numpy.eye",
"numpy.testing.assert_array_almost_equal",
"numpy.ones",
"numpy.arange",
"nose.tools.assert_raises",
"nilearn.decomposition.multi_pca.MultiPCA",
"nibabel.Nifti1Image",
"numpy.random.RandomState"
] | [((341, 350), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (347, 350), True, 'import numpy as np\n'), ((361, 385), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (382, 385), True, 'import numpy as np\n'), ((758, 797), 'nilearn.decomposition.multi_pca.MultiPCA', 'MultiPCA', ([], {'mask': 'mask_img', 'n_components': '(3)'}), '(mask=mask_img, n_components=3)\n', (766, 797), False, 'from nilearn.decomposition.multi_pca import MultiPCA\n'), ((981, 1043), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['components1', 'components2'], {}), '(components1, components2)\n', (1017, 1043), True, 'import numpy as np\n'), ((1370, 1380), 'nilearn.decomposition.multi_pca.MultiPCA', 'MultiPCA', ([], {}), '()\n', (1378, 1380), False, 'from nilearn.decomposition.multi_pca import MultiPCA\n'), ((1385, 1435), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'multi_pca.fit', 'data[:2]'], {}), '(ValueError, multi_pca.fit, data[:2])\n', (1398, 1435), False, 'from nose.tools import assert_raises\n'), ((1807, 1846), 'nose.tools.assert_raises', 'assert_raises', (['TypeError', 'multi_pca.fit'], {}), '(TypeError, multi_pca.fit)\n', (1820, 1846), False, 'from nose.tools import assert_raises\n'), ((699, 732), 'numpy.ones', 'np.ones', (['shape[:3]'], {'dtype': 'np.int8'}), '(shape[:3], dtype=np.int8)\n', (706, 732), True, 'import numpy as np\n'), ((623, 661), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['this_data', 'affine'], {}), '(this_data, affine)\n', (642, 661), False, 'import nibabel\n'), ((1109, 1122), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1118, 1122), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Generic code for a classifier
#
# Subscribes to a feature vector (custom_msgs/Float32MultiArray) and a label (custom_msgs/String)
# Uses upcoming feature data to fit a classifier to predict the label
# Interface with topic command (Start/Stop learning)
import rospy
import numpy as np
import signal
import sys
import threading
import os
from EpicToolbox import FileManager,mkdirfile
from std_msgs.msg import String as StdString
from std_msgs.msg import Header
from custom_msgs.msg import String, Float32MultiArray
from datetime import date
# MODEL DEPENDENT CODE ? WRAP TO CLASS?
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from joblib import dump, load
from copy import deepcopy
from scipy import io
##################### ROS MESSAGES AND PUBLISHERS ##############################
stringmsg=String()
std_stringmsg=StdString()
labelpub = rospy.Publisher('prediction',String,queue_size=1)
logpub = rospy.Publisher('log', StdString, queue_size=50)
################################################################################
labels=[]
label=None
active_model=None
lock=threading.Lock()
learning = False
MAX_SAMPLES=100 #Number of samples per class to hold in memory
size=None #Size of the feature vector
memory=dict() #Sample data
numSamples=dict() #Number of samples
VERBOSE=2
# Setup a Rosbag
path=os.path.join(os.environ['HOME'],date.today().strftime('%m_%d_%y'))
mkdirfile(path)
f=FileManager(path,PathStructure=['Type','File'])
#rosparam=Rosparam('/')
############################ ROS CALLBACKS #####################################
def learning_callback(msg):
'''Enable or disable learning'''
global learning
if msg.data=='START':
printAndLog('Learning enabled')
learning=True
elif msg.data=='STOP':
printAndLog('Learning disabled')
learning=False
def label_callback(msg):
global labels,label,size,memory,numSamples,active_model
print('Label:{}'.format(msg.data))
lock.acquire()
label=msg.data
if label in labels:
pass
else:
print('\t New label to the classifier')
if size==None:
lock.release()
return
labels.append(label)
memory[label]=np.zeros((MAX_SAMPLES,size))
numSamples[label]=0
active_model=None #Reset the model since the number of labels changed
lock.release()
def labelstd_callback(msg):
stringmsg.header.stamp=rospy.Time.now()
stringmsg.data=msg.data
label_callback(stringmsg)
def features_callback(msg):
''' Get a new feature sample and incorporate the sample in memory'''
global active_model,labels,label,memory,numSamples,size,learning
if learning == False:
size=msg.layout.dim[0].size
if learning == True:
lock.acquire()
if label==None:
size=msg.layout.dim[0].size
lock.release()
return
# Add the sample to the buffers for the corresponding label
x=memory[label]
idx=numSamples[label]
if idx<MAX_SAMPLES:
x[idx,:]=msg.data
numSamples[label]=numSamples[label]+1
else:
x=np.roll(x,1,axis=0)
x[0,:]=msg.data
memory[label]=x
numSamples[label]=numSamples[label]+1
lock.release()
# Compute the prediction from the active model
if active_model==None:
return
lock.acquire()
out=active_model.predict(np.array([msg.data]))
lock.release()
stringmsg.header.stamp=rospy.Time.now()
stringmsg.data=out[0]
labelpub.publish(stringmsg)
#publish output
######################## HELPER FUNCTIONS ######################################
def signal_handler(sig,frame):
''' Terminate the connection to eris and close the node'''
print('Ctrl+c')
rosbag.stop()
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
def printAndLog(strdata):
''' Print and publish string data to the log '''
print(strdata)
std_stringmsg.data=strdata
logpub.publish(std_stringmsg)
def memory2xy(memory):
'''Convert the data from memory to a x,y tables for fitting a model'''
labels=memory.keys()
x=[]
y=[]
for l in labels:
x.append(memory[l])
y.append([l]*memory[l].shape[0])
x=np.concatenate(x)
y=np.concatenate(y)
return x,y
def retrain(memory):
global active_model
mdl = deepcopy(active_model)
x,y=memory2xy(memory)
mdl.partial_fit(x,y)
return mdl
def train(memory):
lr=0.05
tol=0.001
mdl=MLPClassifier(hidden_layer_sizes=(10,10),max_iter=300,learning_rate_init=lr,tol=tol,verbose=VERBOSE)
x,y=memory2xy(memory)
mdl.fit(x,y)
return mdl
def CheckPoint():
global active_model,memory
'''Save a copy of the current model and the data in memory'''
modelfiles=f.fileList({'Type':'model','File':'*.mdl'})
nextmodel=f.genList({'Type':'model','File':'{:01d}.mdl'.format(len(modelfiles)+1)})
nextmemory=f.modFileList(nextmodel,{'Type':'memory','ext':'mat'})
#Save the model
printAndLog('Model checkpoint ({})'.format(nextmodel[0]))
print(nextmemory)
mkdirfile(nextmemory[0])
mkdirfile(nextmodel[0])
dump(active_model,nextmodel[0])
io.savemat(nextmemory[0],{'data':memory})
################################################################################
''' Main loop'''
rospy.init_node('classifier', anonymous=True)
labelsub = rospy.Subscriber('label',String,label_callback)
labelstdsub = rospy.Subscriber('labelstd',StdString,labelstd_callback)
learningsub = rospy.Subscriber('train',String,learning_callback)
learningstdsub = rospy.Subscriber('trainstd',StdString,learning_callback)
featuressub = rospy.Subscriber('features',Float32MultiArray,features_callback)
ROSRATE= 1 #Hz
rate = rospy.Rate(ROSRATE)
rate.sleep()
elapsed=0
lasttime=rospy.Time.now()
# Restore from previous model
models=f.fileList({'Type':'model','File':'*.mdl'})
if len(models)>0:
print('Previous models found:\n\t{}'.format(models))
memoryfiles=f.modFileList(models,{'Type':'memory','ext':'mat'})
active_model=load(models[-1])
memory=io.loadmat(memoryfiles[-1])
print(memory.keys())
count=0
while True:
time=rospy.Time.now()
if ((not label==None) and (label in numSamples.keys())):
printAndLog('label={}({}samples)'.format(label,numSamples[label]))
if labels==[]:
rate.sleep()
continue
n=[numSamples[l] for l in labels]
print(n)
if (learning and (count % 5 == 0)):
if (active_model==None) and (np.all(np.greater(n,MAX_SAMPLES))):
mdl=train(memory)
lock.acquire()
active_model=deepcopy(mdl)
lock.release()
CheckPoint()
elif active_model!=None:
mdl=retrain(memory)
lock.acquire()
active_model=mdl
lock.release()
CheckPoint()
count=count+1
#Wait a second
rate.sleep()
| [
"scipy.io.savemat",
"rospy.init_node",
"scipy.io.loadmat",
"numpy.array",
"rospy.Rate",
"copy.deepcopy",
"sys.exit",
"numpy.greater",
"threading.Lock",
"numpy.concatenate",
"joblib.load",
"rospy.Subscriber",
"joblib.dump",
"rospy.Time.now",
"EpicToolbox.mkdirfile",
"rospy.Publisher",
... | [((914, 922), 'custom_msgs.msg.String', 'String', ([], {}), '()\n', (920, 922), False, 'from custom_msgs.msg import String, Float32MultiArray\n'), ((937, 948), 'std_msgs.msg.String', 'StdString', ([], {}), '()\n', (946, 948), True, 'from std_msgs.msg import String as StdString\n'), ((960, 1011), 'rospy.Publisher', 'rospy.Publisher', (['"""prediction"""', 'String'], {'queue_size': '(1)'}), "('prediction', String, queue_size=1)\n", (975, 1011), False, 'import rospy\n'), ((1019, 1067), 'rospy.Publisher', 'rospy.Publisher', (['"""log"""', 'StdString'], {'queue_size': '(50)'}), "('log', StdString, queue_size=50)\n", (1034, 1067), False, 'import rospy\n'), ((1193, 1209), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1207, 1209), False, 'import threading\n'), ((1493, 1508), 'EpicToolbox.mkdirfile', 'mkdirfile', (['path'], {}), '(path)\n', (1502, 1508), False, 'from EpicToolbox import FileManager, mkdirfile\n'), ((1511, 1560), 'EpicToolbox.FileManager', 'FileManager', (['path'], {'PathStructure': "['Type', 'File']"}), "(path, PathStructure=['Type', 'File'])\n", (1522, 1560), False, 'from EpicToolbox import FileManager, mkdirfile\n'), ((3931, 3975), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (3944, 3975), False, 'import signal\n'), ((5468, 5513), 'rospy.init_node', 'rospy.init_node', (['"""classifier"""'], {'anonymous': '(True)'}), "('classifier', anonymous=True)\n", (5483, 5513), False, 'import rospy\n'), ((5525, 5574), 'rospy.Subscriber', 'rospy.Subscriber', (['"""label"""', 'String', 'label_callback'], {}), "('label', String, label_callback)\n", (5541, 5574), False, 'import rospy\n'), ((5587, 5645), 'rospy.Subscriber', 'rospy.Subscriber', (['"""labelstd"""', 'StdString', 'labelstd_callback'], {}), "('labelstd', StdString, labelstd_callback)\n", (5603, 5645), False, 'import rospy\n'), ((5658, 5710), 'rospy.Subscriber', 'rospy.Subscriber', (['"""train"""', 'String', 'learning_callback'], {}), "('train', String, learning_callback)\n", (5674, 5710), False, 'import rospy\n'), ((5726, 5784), 'rospy.Subscriber', 'rospy.Subscriber', (['"""trainstd"""', 'StdString', 'learning_callback'], {}), "('trainstd', StdString, learning_callback)\n", (5742, 5784), False, 'import rospy\n'), ((5797, 5863), 'rospy.Subscriber', 'rospy.Subscriber', (['"""features"""', 'Float32MultiArray', 'features_callback'], {}), "('features', Float32MultiArray, features_callback)\n", (5813, 5863), False, 'import rospy\n'), ((5885, 5904), 'rospy.Rate', 'rospy.Rate', (['ROSRATE'], {}), '(ROSRATE)\n', (5895, 5904), False, 'import rospy\n'), ((5938, 5954), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (5952, 5954), False, 'import rospy\n'), ((2517, 2533), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2531, 2533), False, 'import rospy\n'), ((3604, 3620), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3618, 3620), False, 'import rospy\n'), ((3919, 3930), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3927, 3930), False, 'import sys\n'), ((4377, 4394), 'numpy.concatenate', 'np.concatenate', (['x'], {}), '(x)\n', (4391, 4394), True, 'import numpy as np\n'), ((4401, 4418), 'numpy.concatenate', 'np.concatenate', (['y'], {}), '(y)\n', (4415, 4418), True, 'import numpy as np\n'), ((4490, 4512), 'copy.deepcopy', 'deepcopy', (['active_model'], {}), '(active_model)\n', (4498, 4512), False, 'from copy import deepcopy\n'), ((4633, 4743), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(10, 10)', 'max_iter': '(300)', 'learning_rate_init': 'lr', 'tol': 'tol', 'verbose': 'VERBOSE'}), '(hidden_layer_sizes=(10, 10), max_iter=300, learning_rate_init\n =lr, tol=tol, verbose=VERBOSE)\n', (4646, 4743), False, 'from sklearn.neural_network import MLPClassifier\n'), ((5234, 5258), 'EpicToolbox.mkdirfile', 'mkdirfile', (['nextmemory[0]'], {}), '(nextmemory[0])\n', (5243, 5258), False, 'from EpicToolbox import FileManager, mkdirfile\n'), ((5263, 5286), 'EpicToolbox.mkdirfile', 'mkdirfile', (['nextmodel[0]'], {}), '(nextmodel[0])\n', (5272, 5286), False, 'from EpicToolbox import FileManager, mkdirfile\n'), ((5291, 5323), 'joblib.dump', 'dump', (['active_model', 'nextmodel[0]'], {}), '(active_model, nextmodel[0])\n', (5295, 5323), False, 'from joblib import dump, load\n'), ((5327, 5370), 'scipy.io.savemat', 'io.savemat', (['nextmemory[0]', "{'data': memory}"], {}), "(nextmemory[0], {'data': memory})\n", (5337, 5370), False, 'from scipy import io\n'), ((6195, 6211), 'joblib.load', 'load', (['models[-1]'], {}), '(models[-1])\n', (6199, 6211), False, 'from joblib import dump, load\n'), ((6222, 6249), 'scipy.io.loadmat', 'io.loadmat', (['memoryfiles[-1]'], {}), '(memoryfiles[-1])\n', (6232, 6249), False, 'from scipy import io\n'), ((6305, 6321), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (6319, 6321), False, 'import rospy\n'), ((2307, 2336), 'numpy.zeros', 'np.zeros', (['(MAX_SAMPLES, size)'], {}), '((MAX_SAMPLES, size))\n', (2315, 2336), True, 'import numpy as np\n'), ((3536, 3556), 'numpy.array', 'np.array', (['[msg.data]'], {}), '([msg.data])\n', (3544, 3556), True, 'import numpy as np\n'), ((1458, 1470), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1468, 1470), False, 'from datetime import date\n'), ((3244, 3265), 'numpy.roll', 'np.roll', (['x', '(1)'], {'axis': '(0)'}), '(x, 1, axis=0)\n', (3251, 3265), True, 'import numpy as np\n'), ((6763, 6776), 'copy.deepcopy', 'deepcopy', (['mdl'], {}), '(mdl)\n', (6771, 6776), False, 'from copy import deepcopy\n'), ((6652, 6678), 'numpy.greater', 'np.greater', (['n', 'MAX_SAMPLES'], {}), '(n, MAX_SAMPLES)\n', (6662, 6678), True, 'import numpy as np\n')] |
import unittest
from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack
from rcwa.shorthand import *
from rcwa.testing import *
from rcwa.matrices import *
from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile
import numpy as np
class TestSolver(unittest.TestCase):
def testSetupSource(self):
kIncidentActual = complexArray([1.0607, 0.61237, 0.70711])
kIncidentCalculated = self.solver.source.kIncident
assertAlmostEqual(kIncidentActual, kIncidentCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: kIncident")
def testSetupKMatrices(self):
KxActual = self.Kx
KxCalculated = self.solver.Kx
assertAlmostEqual(KxActual, KxCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: Kx")
KyActual = self.Ky
KyCalculated = self.solver.Ky
assertAlmostEqual(KyActual, KyCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: Ky")
KzActual = self.KzReflectionRegion
KzCalculated = self.solver.KzReflectionRegion
assertAlmostEqual(KzActual, KzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: KzReflectionRegion")
KzActual = self.KzTransmissionRegion
KzCalculated = self.solver.KzTransmissionRegion
assertAlmostEqual(KzActual, KzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: KzTransmissionRegion")
KzActual = self.KzGapRegion
KzCalculated = self.solver.KzGapRegion
assertAlmostEqual(KzActual, KzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: KzGapRegion")
def testEdgeSMatrices(self):
self.solver.Solve()
SActual = self.SReflectionRegion
SCalculated = self.solver.SReflection
assertAlmostEqual(SActual, SCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: SReflection")
self.solver.Solve()
SActual = self.STransmissionRegion
SCalculated = self.solver.STransmission
assertAlmostEqual(SActual, SCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: STransmission")
def testInternalSMatrices(self):
self.solver.Solve()
SActual = self.SLayer1
SCalculated = self.solver.Si[0]
assertAlmostEqual(SActual, SCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: Si[0]")
self.solver.Solve()
SActual = self.SLayer2
SCalculated = self.solver.Si[1]
assertAlmostEqual(SActual, SCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: Si[1]")
def testrtAmplitudeCoefficients(self):
self.solver.Solve()
# HACK - FOR SOME REASON MY PHASE IS OFF BY PI.
rxActual = -self.rx
ryActual = -self.ry
rzActual = -self.rz
(rxCalculated, ryCalculated, rzCalculated) = (self.solver.rx, self.solver.ry, self.solver.rz)
(txCalculated, tyCalculated, tzCalculated) = (self.solver.tx, self.solver.ty, self.solver.tz)
assertAlmostEqual(rxActual, rxCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: rx")
assertAlmostEqual(ryActual, ryCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: ry")
assertAlmostEqual(rzActual, rzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: rz")
txActual = -self.tx
tyActual = -self.ty
tzActual = -self.tz
(rxCalculated, ryCalculated, rzCalculated) = (self.solver.rx, self.solver.ry, self.solver.rz)
(txCalculated, tyCalculated, tzCalculated) = (self.solver.tx, self.solver.ty, self.solver.tz)
(R, T) = (self.solver.R, self.solver.T)
assertAlmostEqual(txActual, txCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: tx")
assertAlmostEqual(tyActual, tyCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: ty")
assertAlmostEqual(tzActual, tzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: tz")
def testDiffractionEfficiencies(self):
self.solver.Solve()
RActual = self.R
TActual = self.T
(RCalculated, TCalculated) = (self.solver.R, self.solver.T)
assertAlmostEqual(RActual, RCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: R")
assertAlmostEqual(TActual, TCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: T")
RTotActual = self.RTot
TTotActual = self.TTot
CTotActual = 1.0
RTotCalculated = self.solver.RTot
TTotCalculated = self.solver.TTot
CTotCalculated = self.solver.conservation
assertAlmostEqual(RTotActual, RTotCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: RTot")
assertAlmostEqual(TTotActual, TTotCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: TTot")
assertAlmostEqual(CTotActual, CTotCalculated, 1e-7, 1e-7, "testSolver: Conservation Violated")
def testIntegrationMultiWavelength(self):
testWavelengths = self.solver.source.wavelength*np.arange(0.2,2,0.01)
self.solver.Solve(testWavelengths)
#Plotter.plotReflectionSpectra(self.solver.results)
def setUp(self):
self.absoluteTolerance = 1e-4
self.relativeTolerance = 1e-3
devicePermittivityCellData = np.transpose(np.loadtxt(testLocation + '/triangleData.csv', delimiter=','))
devicePermeabilityCellData = 1 + 0 * devicePermittivityCellData
reflectionLayer = Layer(er=2.0, ur=1.0)
transmissionLayer = Layer(er=9.0, ur=1.0)
# NOTE: t1 AND t2 MUST BE NORMALIZED BY MULTIPLYING BY k0, OTHERWISE THIS WILL NOT WORK, AS
# EVERYTHING WAS FORMULATED IN TERMS OF NORMALIZED WAVEVECTORS. I DON'T KNOW OF AN ELEGANT WAY
# TO DO THIS OTHER THAN REQUIRING A CRYSTAL TO HAVE A SOURCE AS THE INPUT. I DON'T KNOW OF
# AN EASY WAY TO FIX THIS. I'M GOING TO FUDGE IT FOR NOW.
wavelength = 2
k0 = 2*pi/wavelength
theta = 60 * deg
phi = 30*deg
pTEM = 1/sqrt(2)*complexArray([1,1j])
source = Source(wavelength=wavelength, theta=theta, phi=phi, pTEM=pTEM, layer=reflectionLayer)
t1, t2 = complexArray([1.75, 0, 0]), complexArray([0, 1.5, 0])
thicknessLayer1 = 0.5 # should be 0.5
thicknessLayer2 = 0.3 # should be 0.3
numberHarmonics = (3, 3)
deviceCrystal = Crystal(devicePermittivityCellData, devicePermeabilityCellData, t1, t2)
layer1 = Layer(crystal=deviceCrystal, L=thicknessLayer1, numberHarmonics=numberHarmonics)
layer2 = Layer(er=6.0, ur=1.0, L=thicknessLayer2)
layerStack = LayerStack(reflectionLayer, layer1, layer2, transmissionLayer)
self.solver = Solver(layerStack, source, numberHarmonics)
@classmethod
def setUpClass(self):
"""
Test fixture for loading in all the external test data.
"""
self.Kx = np.diag(complexArray(
[2.2035, 1.0607, -0.0822, 2.2035, 1.0607, -0.0822, 2.2035, 1.0607, -0.0822]))
self.Ky = np.diag(complexArray(
[1.9457, 1.9457, 1.9457, 0.6124, 0.6124, 0.6124, -0.7210, -0.7210, -0.7210]))
self.KzReflectionRegion = numpyArrayFromFile(
testLocation + "/matrixDataOblique/reflectionRegion/KzReflectionRegion.txt")
self.KzTransmissionRegion = np.diag(complexArray(
[0.5989, 2.0222, 2.2820, 1.9415, 2.7386, 2.9357, 1.9039, 2.7121, 2.9109]))
self.KzGapRegion = numpyArrayFromFile(
testLocation + "/matrixDataOblique/freeSpace/KzFreeSpace.txt")
self.SGlobal11= numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/SGlobal11.txt")
self.SGlobal12= numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/SGlobal12.txt")
self.SGlobal21= numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/SGlobal21.txt")
self.SGlobal22= numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/SGlobal22.txt")
self.SGlobal = complexArray([
[self.SGlobal11, self.SGlobal12],
[self.SGlobal21, self.SGlobal22]])
self.S11ReflectionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/reflectionRegion/S11ReflectionRegion.txt")
self.S12ReflectionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/reflectionRegion/S12ReflectionRegion.txt")
self.S21ReflectionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/reflectionRegion/S21ReflectionRegion.txt")
self.S22ReflectionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/reflectionRegion/S22ReflectionRegion.txt")
self.SReflectionRegion = complexArray([
[self.S11ReflectionRegion, self.S12ReflectionRegion],
[self.S21ReflectionRegion, self.S22ReflectionRegion]])
self.S11TransmissionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/transmissionRegion/S11TransmissionRegion.txt")
self.S12TransmissionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/transmissionRegion/S12TransmissionRegion.txt")
self.S21TransmissionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/transmissionRegion/S21TransmissionRegion.txt")
self.S22TransmissionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/transmissionRegion/S22TransmissionRegion.txt")
self.STransmissionRegion = complexArray([
[self.S11TransmissionRegion, self.S12TransmissionRegion],
[self.S21TransmissionRegion, self.S22TransmissionRegion]])
self.S11Layer1 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer1/S11Layer1.txt")
self.S12Layer1 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer1/S12Layer1.txt")
self.S21Layer1 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer1/S21Layer1.txt")
self.S22Layer1 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer1/S22Layer1.txt")
self.SLayer1 = complexArray([
[self.S11Layer1, self.S12Layer1],
[self.S21Layer1, self.S22Layer1]])
self.S11Layer2 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer2/S11Layer2.txt")
self.S12Layer2 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer2/S12Layer2.txt")
self.S21Layer2 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer2/S21Layer2.txt")
self.S22Layer2 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer2/S22Layer2.txt")
self.SLayer2 = complexArray([
[self.S11Layer2, self.S12Layer2],
[self.S21Layer2, self.S22Layer2]])
self.DLayer12= np.loadtxt(testLocation + '/matrixDataOblique/layer2/D12.csv', dtype=np.cdouble)
self.FLayer12= np.loadtxt(testLocation + '/matrixDataOblique/layer2/F12.csv', dtype=np.cdouble)
self.rx = complexArray([-0.0187- 0.0155j, 0.0486 - 0.0467j, 0.0016 + 0.0012j,
0.0324 - 0.0229j, -0.1606 - 0.0348j, -0.0089 + 0.0156j,
0.0020 + 0.0105j, 0.0076 + 0.0187j, -0.0027 - 0.0129j])
self.ry = complexArray([-0.0077 - 0.0106j, 0.0184 + 0.0323j, -0.0267 - 0.0070j,
-0.0286 + 0.0472j, 0.2335 + 0.0138j, 0.0243 + 0.0164j,
0.0435 - 0.018j, 0.0183 + 0.0146j, -0.0062 + 0.0011j])
self.rz = complexArray([0.0213 - 0.0218j, -0.0078 + 0.0512j, 0.0103 - 0.0388j,
0.0120 + 0.0300j, -0.0386 - 0.0403j, 0.0123 + 0.0069j,
-0.0197 - 0.0147j, -0.0087 + 0.0157j, 0.0039 + 0.0002j])
self.tx = complexArray([0.0015 - 0.0016j, -0.0583 + 0.0256j, -0.0245 - 0.0098j,
0.0060 + 0.0210j, 0.3040 + 0.0664j, -0.0054 - 0.0632j,
-0.0123 - 0.0262j, -0.0323 - 0.0534j, 0.0169 + 0.0455j])
self.ty = complexArray([-0.0024 + 0.0011j, 0.0356 + 0.0282j, -0.0230 - 0.0071j,
0.0610 - 0.0011j, 0.0523 - 0.2913j, -0.0645 - 0.0027j,
-0.0170 - 0.0165j, -0.0420 + 0.0298j, 0.0258 - 0.0234j])
self.tz = complexArray([0.0023 + 0.0021j, - 0.0036 - 0.0406j, 0.0187 + 0.0057j,
-0.0261 - 0.0235j, -0.1294 + 0.0394j, 0.0133 - 0.0012j,
0.0078 + 0.0241j, 0.0014 + 0.0288j, 0.0069 - 0.0045j])
self.R = np.array([
[0,0,0],
[0,0.0848, 0.0011],
[0, 0.0025, 0.0004]])
self.T = np.array([
[0, 0.0149, 0.0055],
[0.0222, 0.7851, 0.0283],
[0.0053, 0.0348, 0.0150]])
self.R = np.transpose(self.R)
self.T = np.transpose(self.T)
self.RTot = 0.088768
self.TTot = 0.91123
| [
"numpy.arange",
"rcwa.Source",
"rcwa.Crystal",
"rcwa.Solver",
"numpy.array",
"numpy.loadtxt",
"rcwa.LayerStack",
"rcwa.numpyArrayFromSeparatedColumnsFile",
"rcwa.Layer",
"numpy.transpose",
"rcwa.numpyArrayFromFile"
] | [((5963, 5984), 'rcwa.Layer', 'Layer', ([], {'er': '(2.0)', 'ur': '(1.0)'}), '(er=2.0, ur=1.0)\n', (5968, 5984), False, 'from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack\n'), ((6013, 6034), 'rcwa.Layer', 'Layer', ([], {'er': '(9.0)', 'ur': '(1.0)'}), '(er=9.0, ur=1.0)\n', (6018, 6034), False, 'from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack\n'), ((6565, 6655), 'rcwa.Source', 'Source', ([], {'wavelength': 'wavelength', 'theta': 'theta', 'phi': 'phi', 'pTEM': 'pTEM', 'layer': 'reflectionLayer'}), '(wavelength=wavelength, theta=theta, phi=phi, pTEM=pTEM, layer=\n reflectionLayer)\n', (6571, 6655), False, 'from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack\n'), ((6874, 6945), 'rcwa.Crystal', 'Crystal', (['devicePermittivityCellData', 'devicePermeabilityCellData', 't1', 't2'], {}), '(devicePermittivityCellData, devicePermeabilityCellData, t1, t2)\n', (6881, 6945), False, 'from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack\n'), ((6963, 7048), 'rcwa.Layer', 'Layer', ([], {'crystal': 'deviceCrystal', 'L': 'thicknessLayer1', 'numberHarmonics': 'numberHarmonics'}), '(crystal=deviceCrystal, L=thicknessLayer1, numberHarmonics=numberHarmonics\n )\n', (6968, 7048), False, 'from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack\n'), ((7061, 7101), 'rcwa.Layer', 'Layer', ([], {'er': '(6.0)', 'ur': '(1.0)', 'L': 'thicknessLayer2'}), '(er=6.0, ur=1.0, L=thicknessLayer2)\n', (7066, 7101), False, 'from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack\n'), ((7123, 7185), 'rcwa.LayerStack', 'LayerStack', (['reflectionLayer', 'layer1', 'layer2', 'transmissionLayer'], {}), '(reflectionLayer, layer1, layer2, transmissionLayer)\n', (7133, 7185), False, 'from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack\n'), ((7210, 7253), 'rcwa.Solver', 'Solver', (['layerStack', 'source', 'numberHarmonics'], {}), '(layerStack, source, numberHarmonics)\n', (7216, 7253), False, 'from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack\n'), ((7680, 7779), 'rcwa.numpyArrayFromFile', 'numpyArrayFromFile', (["(testLocation + '/matrixDataOblique/reflectionRegion/KzReflectionRegion.txt')"], {}), "(testLocation +\n '/matrixDataOblique/reflectionRegion/KzReflectionRegion.txt')\n", (7698, 7779), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((7965, 8050), 'rcwa.numpyArrayFromFile', 'numpyArrayFromFile', (["(testLocation + '/matrixDataOblique/freeSpace/KzFreeSpace.txt')"], {}), "(testLocation +\n '/matrixDataOblique/freeSpace/KzFreeSpace.txt')\n", (7983, 8050), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((8089, 8178), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/SGlobal11.txt')"], {}), "(testLocation +\n '/matrixDataOblique/SGlobal11.txt')\n", (8123, 8178), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((8216, 8305), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/SGlobal12.txt')"], {}), "(testLocation +\n '/matrixDataOblique/SGlobal12.txt')\n", (8250, 8305), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((8343, 8432), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/SGlobal21.txt')"], {}), "(testLocation +\n '/matrixDataOblique/SGlobal21.txt')\n", (8377, 8432), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((8470, 8559), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/SGlobal22.txt')"], {}), "(testLocation +\n '/matrixDataOblique/SGlobal22.txt')\n", (8504, 8559), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((8740, 8856), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/reflectionRegion/S11ReflectionRegion.txt')"], {}), "(testLocation +\n '/matrixDataOblique/reflectionRegion/S11ReflectionRegion.txt')\n", (8774, 8856), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((8905, 9021), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/reflectionRegion/S12ReflectionRegion.txt')"], {}), "(testLocation +\n '/matrixDataOblique/reflectionRegion/S12ReflectionRegion.txt')\n", (8939, 9021), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((9070, 9186), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/reflectionRegion/S21ReflectionRegion.txt')"], {}), "(testLocation +\n '/matrixDataOblique/reflectionRegion/S21ReflectionRegion.txt')\n", (9104, 9186), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((9235, 9351), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/reflectionRegion/S22ReflectionRegion.txt')"], {}), "(testLocation +\n '/matrixDataOblique/reflectionRegion/S22ReflectionRegion.txt')\n", (9269, 9351), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((9584, 9704), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation +\n '/matrixDataOblique/transmissionRegion/S11TransmissionRegion.txt')"], {}), "(testLocation +\n '/matrixDataOblique/transmissionRegion/S11TransmissionRegion.txt')\n", (9618, 9704), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((9755, 9875), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation +\n '/matrixDataOblique/transmissionRegion/S12TransmissionRegion.txt')"], {}), "(testLocation +\n '/matrixDataOblique/transmissionRegion/S12TransmissionRegion.txt')\n", (9789, 9875), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((9926, 10046), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation +\n '/matrixDataOblique/transmissionRegion/S21TransmissionRegion.txt')"], {}), "(testLocation +\n '/matrixDataOblique/transmissionRegion/S21TransmissionRegion.txt')\n", (9960, 10046), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((10097, 10217), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation +\n '/matrixDataOblique/transmissionRegion/S22TransmissionRegion.txt')"], {}), "(testLocation +\n '/matrixDataOblique/transmissionRegion/S22TransmissionRegion.txt')\n", (10131, 10217), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((10448, 10544), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/layer1/S11Layer1.txt')"], {}), "(testLocation +\n '/matrixDataOblique/layer1/S11Layer1.txt')\n", (10482, 10544), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((10566, 10662), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/layer1/S12Layer1.txt')"], {}), "(testLocation +\n '/matrixDataOblique/layer1/S12Layer1.txt')\n", (10600, 10662), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((10684, 10780), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/layer1/S21Layer1.txt')"], {}), "(testLocation +\n '/matrixDataOblique/layer1/S21Layer1.txt')\n", (10718, 10780), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((10802, 10898), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/layer1/S22Layer1.txt')"], {}), "(testLocation +\n '/matrixDataOblique/layer1/S22Layer1.txt')\n", (10836, 10898), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((11052, 11148), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/layer2/S11Layer2.txt')"], {}), "(testLocation +\n '/matrixDataOblique/layer2/S11Layer2.txt')\n", (11086, 11148), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((11170, 11266), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/layer2/S12Layer2.txt')"], {}), "(testLocation +\n '/matrixDataOblique/layer2/S12Layer2.txt')\n", (11204, 11266), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((11288, 11384), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/layer2/S21Layer2.txt')"], {}), "(testLocation +\n '/matrixDataOblique/layer2/S21Layer2.txt')\n", (11322, 11384), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((11406, 11502), 'rcwa.numpyArrayFromSeparatedColumnsFile', 'numpyArrayFromSeparatedColumnsFile', (["(testLocation + '/matrixDataOblique/layer2/S22Layer2.txt')"], {}), "(testLocation +\n '/matrixDataOblique/layer2/S22Layer2.txt')\n", (11440, 11502), False, 'from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile\n'), ((11653, 11738), 'numpy.loadtxt', 'np.loadtxt', (["(testLocation + '/matrixDataOblique/layer2/D12.csv')"], {'dtype': 'np.cdouble'}), "(testLocation + '/matrixDataOblique/layer2/D12.csv', dtype=np.cdouble\n )\n", (11663, 11738), True, 'import numpy as np\n'), ((11757, 11842), 'numpy.loadtxt', 'np.loadtxt', (["(testLocation + '/matrixDataOblique/layer2/F12.csv')"], {'dtype': 'np.cdouble'}), "(testLocation + '/matrixDataOblique/layer2/F12.csv', dtype=np.cdouble\n )\n", (11767, 11842), True, 'import numpy as np\n'), ((13195, 13258), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0.0848, 0.0011], [0, 0.0025, 0.0004]]'], {}), '([[0, 0, 0], [0, 0.0848, 0.0011], [0, 0.0025, 0.0004]])\n', (13203, 13258), True, 'import numpy as np\n'), ((13310, 13397), 'numpy.array', 'np.array', (['[[0, 0.0149, 0.0055], [0.0222, 0.7851, 0.0283], [0.0053, 0.0348, 0.015]]'], {}), '([[0, 0.0149, 0.0055], [0.0222, 0.7851, 0.0283], [0.0053, 0.0348, \n 0.015]])\n', (13318, 13397), True, 'import numpy as np\n'), ((13448, 13468), 'numpy.transpose', 'np.transpose', (['self.R'], {}), '(self.R)\n', (13460, 13468), True, 'import numpy as np\n'), ((13486, 13506), 'numpy.transpose', 'np.transpose', (['self.T'], {}), '(self.T)\n', (13498, 13506), True, 'import numpy as np\n'), ((5527, 5550), 'numpy.arange', 'np.arange', (['(0.2)', '(2)', '(0.01)'], {}), '(0.2, 2, 0.01)\n', (5536, 5550), True, 'import numpy as np\n'), ((5801, 5862), 'numpy.loadtxt', 'np.loadtxt', (["(testLocation + '/triangleData.csv')"], {'delimiter': '""","""'}), "(testLocation + '/triangleData.csv', delimiter=',')\n", (5811, 5862), True, 'import numpy as np\n')] |
from os import path
import numpy as np
from PIL import Image
from scipy import ndimage
from glob import glob
from medraw_handler import medraw2mask
from skimage.color import label2rgb
for f_idx in [1, 2]: # process two images
auto_label = np.array(Image.open('auto_segs/{}_auto_seg.png'.format(f_idx)))
he_image = np.array(Image.open('auto_segs/{}_original.png'.format(f_idx)))
fn = 'upload/mask/{}_mask.png'.format(f_idx)
if path.exists(fn):
human_label = medraw2mask(fn)
else:
human_label = np.zeros((auto_label.shape[0], auto_label.shape[1]), dtype=np.int16)
human_remove = (human_label == -1)
human_overwrite = np.zeros_like(human_label)
for color_idx in range(1, human_label.max() + 1):
human_overwrite += ndimage.binary_fill_holes(human_label == color_idx)
human_overwrite = (human_overwrite > 0)
for nuc_idx in range(1, auto_label.max() + 1):
nuc_mask = (auto_label == nuc_idx)
if (nuc_mask * human_remove).sum() > 0:
auto_label[nuc_mask] = 0
if (nuc_mask * human_overwrite).sum() > nuc_mask.sum() * 0.40:
auto_label[nuc_mask] = 0
if nuc_mask.sum() < 30:
# Some false auto-segmentation results are very small and hard to spot by eyes.
auto_label[nuc_mask] = 0
nuc_add = auto_label.max() + 1
for color_idx in range(1, human_label.max() + 1):
color_mask = (human_label == color_idx)
if color_mask.sum() == 0:
continue
nucs, n_nucs = ndimage.measurements.label(color_mask)
for nuc_idx in range(1, n_nucs + 1):
nuc_mask = (nucs == nuc_idx)
nuc_mask = ndimage.binary_fill_holes(nuc_mask)
auto_label[nuc_mask] = nuc_add
nuc_add += 1
Image.fromarray(auto_label).save('output/{}_final_seg.png'.format(f_idx))
label_rgb = (label2rgb(auto_label, image=he_image, alpha=0.6, bg_label=0) * 255).astype(np.uint8)
Image.fromarray(label_rgb).save('output/{}_final_seg_visual.png'.format(f_idx))
| [
"os.path.exists",
"PIL.Image.fromarray",
"medraw_handler.medraw2mask",
"scipy.ndimage.measurements.label",
"scipy.ndimage.binary_fill_holes",
"numpy.zeros",
"skimage.color.label2rgb",
"numpy.zeros_like"
] | [((445, 460), 'os.path.exists', 'path.exists', (['fn'], {}), '(fn)\n', (456, 460), False, 'from os import path\n'), ((664, 690), 'numpy.zeros_like', 'np.zeros_like', (['human_label'], {}), '(human_label)\n', (677, 690), True, 'import numpy as np\n'), ((484, 499), 'medraw_handler.medraw2mask', 'medraw2mask', (['fn'], {}), '(fn)\n', (495, 499), False, 'from medraw_handler import medraw2mask\n'), ((532, 600), 'numpy.zeros', 'np.zeros', (['(auto_label.shape[0], auto_label.shape[1])'], {'dtype': 'np.int16'}), '((auto_label.shape[0], auto_label.shape[1]), dtype=np.int16)\n', (540, 600), True, 'import numpy as np\n'), ((772, 823), 'scipy.ndimage.binary_fill_holes', 'ndimage.binary_fill_holes', (['(human_label == color_idx)'], {}), '(human_label == color_idx)\n', (797, 823), False, 'from scipy import ndimage\n'), ((1534, 1572), 'scipy.ndimage.measurements.label', 'ndimage.measurements.label', (['color_mask'], {}), '(color_mask)\n', (1560, 1572), False, 'from scipy import ndimage\n'), ((1682, 1717), 'scipy.ndimage.binary_fill_holes', 'ndimage.binary_fill_holes', (['nuc_mask'], {}), '(nuc_mask)\n', (1707, 1717), False, 'from scipy import ndimage\n'), ((1791, 1818), 'PIL.Image.fromarray', 'Image.fromarray', (['auto_label'], {}), '(auto_label)\n', (1806, 1818), False, 'from PIL import Image\n'), ((1971, 1997), 'PIL.Image.fromarray', 'Image.fromarray', (['label_rgb'], {}), '(label_rgb)\n', (1986, 1997), False, 'from PIL import Image\n'), ((1882, 1942), 'skimage.color.label2rgb', 'label2rgb', (['auto_label'], {'image': 'he_image', 'alpha': '(0.6)', 'bg_label': '(0)'}), '(auto_label, image=he_image, alpha=0.6, bg_label=0)\n', (1891, 1942), False, 'from skimage.color import label2rgb\n')] |
# Copyright (c) 2017 OpenAI (http://openai.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import scipy.signal
def discount(x, gamma):
"""
computes discounted sums along 0th dimension of x.
inputs
------
x: ndarray
gamma: float
outputs
-------
y: ndarray with same shape as x, satisfying
y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],
where k = len(x) - t - 1
"""
assert x.ndim >= 1
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
def explained_variance_2d(ypred, y):
assert y.ndim == 2 and ypred.ndim == 2
vary = np.var(y, axis=0)
out = 1 - np.var(y-ypred)/vary
out[vary < 1e-10] = 0
return out
def ncc(ypred, y):
return np.corrcoef(ypred, y)[1,0]
def flatten_arrays(arrs):
return np.concatenate([arr.flat for arr in arrs])
def unflatten_vector(vec, shapes):
i=0
arrs = []
for shape in shapes:
size = np.prod(shape)
arr = vec[i:i+size].reshape(shape)
arrs.append(arr)
i += size
return arrs
def discount_with_boundaries(X, New, gamma):
"""
X: 2d array of floats, time x features
New: 2d array of bools, indicating when a new episode has started
"""
Y = np.zeros_like(X)
T = X.shape[0]
Y[T-1] = X[T-1]
for t in range(T-2, -1, -1):
Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1])
return Y
def test_discount_with_boundaries():
gamma=0.9
x = np.array([1.0, 2.0, 3.0, 4.0], 'float32')
starts = [1.0, 0.0, 0.0, 1.0]
y = discount_with_boundaries(x, starts, gamma)
assert np.allclose(y, [
1 + gamma * 2 + gamma**2 * 3,
2 + gamma * 3,
3,
4
]) | [
"numpy.prod",
"numpy.allclose",
"numpy.corrcoef",
"numpy.array",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.var"
] | [((1948, 1957), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (1954, 1957), True, 'import numpy as np\n'), ((2109, 2126), 'numpy.var', 'np.var', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (2115, 2126), True, 'import numpy as np\n'), ((2299, 2341), 'numpy.concatenate', 'np.concatenate', (['[arr.flat for arr in arrs]'], {}), '([arr.flat for arr in arrs])\n', (2313, 2341), True, 'import numpy as np\n'), ((2740, 2756), 'numpy.zeros_like', 'np.zeros_like', (['X'], {}), '(X)\n', (2753, 2756), True, 'import numpy as np\n'), ((2956, 2997), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0]', '"""float32"""'], {}), "([1.0, 2.0, 3.0, 4.0], 'float32')\n", (2964, 2997), True, 'import numpy as np\n'), ((3094, 3163), 'numpy.allclose', 'np.allclose', (['y', '[1 + gamma * 2 + gamma ** 2 * 3, 2 + gamma * 3, 3, 4]'], {}), '(y, [1 + gamma * 2 + gamma ** 2 * 3, 2 + gamma * 3, 3, 4])\n', (3105, 3163), True, 'import numpy as np\n'), ((2234, 2255), 'numpy.corrcoef', 'np.corrcoef', (['ypred', 'y'], {}), '(ypred, y)\n', (2245, 2255), True, 'import numpy as np\n'), ((2440, 2454), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (2447, 2454), True, 'import numpy as np\n'), ((2141, 2158), 'numpy.var', 'np.var', (['(y - ypred)'], {}), '(y - ypred)\n', (2147, 2158), True, 'import numpy as np\n'), ((1996, 2013), 'numpy.var', 'np.var', (['(y - ypred)'], {}), '(y - ypred)\n', (2002, 2013), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import json
import math
import os
import six
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
import paddlehub as hub
from paddlehub.common.paddle_helper import get_variable_info
from paddlehub.common.utils import sys_stdin_encoding
from paddlehub.io.parser import txt_parser
from paddlehub.module.module import serving
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from simnet_bow.processor import load_vocab, preprocess, postprocess
class DataFormatError(Exception):
def __init__(self, *args):
self.args = args
@moduleinfo(
name="simnet_bow",
version="1.1.0",
summary=
"Baidu's open-source similarity network model based on bow_pairwise.",
author="baidu-nlp",
author_email="",
type="nlp/sentiment_analysis")
class SimnetBow(hub.Module):
def _initialize(self):
"""
initialize with the necessary elements
"""
self.pretrained_model_path = os.path.join(self.directory, "infer_model")
self.vocab_path = os.path.join(self.directory, "assets", "vocab.txt")
self.vocab = load_vocab(self.vocab_path)
self.param_file = os.path.join(self.directory, "assets", "params.txt")
self._word_seg_module = None
self._set_config()
@property
def word_seg_module(self):
"""
lac module
"""
if not self._word_seg_module:
self._word_seg_module = hub.Module(name="lac")
return self._word_seg_module
def _set_config(self):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
cpu_config.switch_ir_optim(False)
self.cpu_predictor = create_paddle_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
int(_places[0])
use_gpu = True
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=500, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
def context(self, trainable=False):
"""
Get the input ,output and program of the pretrained simnet_bow
Args:
trainable(bool): whether fine-tune the pretrained parameters of simnet_bow or not
Returns:
inputs(dict): the input variables of simnet_bow (words)
outputs(dict): the output variables of simnet_bow (the sentiment prediction results)
main_program(Program): the main_program of lac with pretrained prameters
"""
place = fluid.CPUPlace()
exe = fluid.Executor(place)
program, feed_target_names, fetch_targets = fluid.io.load_inference_model(
dirname=self.pretrained_model_path, executor=exe)
with open(self.param_file, 'r') as file:
params_list = file.readlines()
for param in params_list:
param = param.strip()
var = program.global_block().var(param)
var_info = get_variable_info(var)
program.global_block().create_parameter(
shape=var_info['shape'],
dtype=var_info['dtype'],
name=var_info['name'])
for param in program.global_block().iter_parameters():
param.trainable = trainable
inputs = {}
for name, var in program.global_block().vars.items():
if name == feed_target_names[0]:
inputs["text_1"] = var
if name == feed_target_names[1]:
inputs["text_2"] = var
# output of sencond layer from the end prediction layer (fc-softmax)
outputs = {
"left_feature": fetch_targets[0],
"similarity": fetch_targets[1]
}
return inputs, outputs, program
def texts2tensor(self, texts):
"""
Tranform the texts(dict) to PaddleTensor
Args:
texts(dict): texts
Returns:
tensor(PaddleTensor): tensor with texts data
"""
lod = [0]
data = []
for i, text in enumerate(texts):
data += text['processed']
lod.append(len(text['processed']) + lod[i])
tensor = PaddleTensor(np.array(data).astype('int64'))
tensor.name = "words"
tensor.lod = [lod]
tensor.shape = [lod[-1], 1]
return tensor
def to_unicode(self, texts):
"""
Convert each element's type(str) of texts(list) to unicode in python2.7
Args:
texts(list): each element's type is str in python2.7
Returns:
texts(list): each element's type is unicode in python2.7
"""
if six.PY2:
unicode_texts = []
for text in texts:
if not isinstance(text, unicode):
unicode_texts.append(
text.decode(sys_stdin_encoding()).decode("utf8"))
else:
unicode_texts.append(text)
texts = unicode_texts
return texts
def check_data(self, texts=[], data={}):
"""
check input data
Args:
texts(list): the input texts to be predicted which the first element is text_1(list)
and the second element is text_2(list), such as [['这道题很难'], ['这道题不简单']]
if texts not data.
data(dict): key must be 'text_1' and 'text_2', value is the texts(list) to be predicted
Returns:
results(dict): predicted data
"""
predicted_data = {'text_1': [], 'text_2': []}
if texts != [] and isinstance(texts, list) and len(texts) == 2 and (len(
texts[0]) == len(
texts[1])) and texts[0] and texts[1] and data == {}:
predicted_data['text_1'] = texts[0]
predicted_data['text_2'] = texts[1]
elif texts == [] and isinstance(data, dict) and isinstance(
data.get('text_1', None), list) and isinstance(
data.get('text_2', None),
list) and (len(data['text_1']) == len(
data['text_2'])) and data['text_1'] and data['text_2']:
predicted_data = data
else:
raise ValueError(
"The input data is inconsistent with expectations.")
return predicted_data
@serving
def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1):
"""
Get the sentiment prediction results results with the texts as input
Args:
texts(list): the input texts to be predicted which the first element is text_1(list)
and the second element is text_2(list), such as [['这道题很难'], ['这道题不简单']]
if texts not data.
data(dict): key must be 'text_1' and 'text_2', value is the texts(list) to be predicted
use_gpu(bool): whether use gpu to predict or not
batch_size(int): the program deals once with one batch
Returns:
results(list): the word segmentation results
"""
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
int(_places[0])
except:
use_gpu = False
data = self.check_data(texts, data)
start_idx = 0
iteration = int(math.ceil(len(data['text_1']) / batch_size))
results = []
for i in range(iteration):
batch_data = {'text_1': [], 'text_2': []}
if i < (iteration - 1):
batch_data['text_1'] = data['text_1'][start_idx:(
start_idx + batch_size)]
batch_data['text_2'] = data['text_2'][start_idx:(
start_idx + batch_size)]
else:
batch_data['text_1'] = data['text_1'][start_idx:(
start_idx + batch_size)]
batch_data['text_2'] = data['text_2'][start_idx:(
start_idx + batch_size)]
start_idx = start_idx + batch_size
processed_results = preprocess(self.word_seg_module, self.vocab,
batch_data, use_gpu, batch_size)
tensor_words_1 = self.texts2tensor(processed_results["text_1"])
tensor_words_2 = self.texts2tensor(processed_results["text_2"])
if use_gpu:
batch_out = self.gpu_predictor.run(
[tensor_words_1, tensor_words_2])
else:
batch_out = self.cpu_predictor.run(
[tensor_words_1, tensor_words_2])
batch_result = postprocess(batch_out[1], processed_results)
results += batch_result
return results
@runnable
def run_cmd(self, argvs):
"""
Run as a command
"""
self.parser = argparse.ArgumentParser(
description="Run the simnet_bow module.",
prog='hub run simnet_bow',
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(
title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options",
description=
"Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
try:
input_data = self.check_input_data(args)
except DataFormatError and RuntimeError:
self.parser.print_help()
return None
results = self.similarity(
data=input_data, use_gpu=args.use_gpu, batch_size=args.batch_size)
return results
def add_module_config_arg(self):
"""
Add the command config options
"""
self.arg_config_group.add_argument(
'--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU for prediction")
self.arg_config_group.add_argument(
'--batch_size',
type=int,
default=1,
help="batch size for prediction")
def add_module_input_arg(self):
"""
Add the command input options
"""
self.arg_input_group.add_argument(
'--input_file',
type=str,
default=None,
help="file contain input data")
self.arg_input_group.add_argument(
'--text_1', type=str, default=None, help="text to predict")
self.arg_input_group.add_argument(
'--text_2', type=str, default=None, help="text to predict")
def check_input_data(self, args):
input_data = {}
if args.input_file:
if not os.path.exists(args.input_file):
print("File %s is not exist." % args.input_file)
raise RuntimeError
else:
input_data = txt_parser.parse(args.input_file, use_strip=True)
elif args.text_1 and args.text_2:
if args.text_1.strip() != '' and args.text_2.strip() != '':
if six.PY2:
input_data = {
"text_1": [
args.text_1.strip().decode(
sys_stdin_encoding()).decode("utf8")
],
"text_2": [
args.text_2.strip().decode(
sys_stdin_encoding()).decode("utf8")
]
}
else:
input_data = {
"text_1": [args.text_1],
"text_2": [args.text_2]
}
else:
print(
"ERROR: The input data is inconsistent with expectations.")
if input_data == {}:
print("ERROR: The input data is inconsistent with expectations.")
raise DataFormatError
return input_data
def get_vocab_path(self):
"""
Get the path to the vocabulary whih was used to pretrain
Returns:
self.vocab_path(str): the path to vocabulary
"""
return self.vocab_path
if __name__ == "__main__":
simnet_bow = SimnetBow()
simnet_bow.context()
# Data to be predicted
test_text_1 = ["这道题太难了", "这道题太难了", "这道题太难了"]
test_text_2 = ["这道题是上一年的考题", "这道题不简单", "这道题很有意思"]
inputs = {"text_1": test_text_1, "text_2": test_text_2}
results = simnet_bow.similarity(data=inputs, batch_size=2)
print(results)
max_score = -1
result_text = ""
for result in results:
if result['similarity'] > max_score:
max_score = result['similarity']
result_text = result['text_2']
print("The most matching with the %s is %s" % (test_text_1[0], result_text))
| [
"os.path.exists",
"paddle.fluid.io.load_inference_model",
"paddlehub.module.module.moduleinfo",
"simnet_bow.processor.postprocess",
"argparse.ArgumentParser",
"simnet_bow.processor.preprocess",
"os.path.join",
"paddle.fluid.CPUPlace",
"paddlehub.io.parser.txt_parser.parse",
"simnet_bow.processor.l... | [((821, 1023), 'paddlehub.module.module.moduleinfo', 'moduleinfo', ([], {'name': '"""simnet_bow"""', 'version': '"""1.1.0"""', 'summary': '"""Baidu\'s open-source similarity network model based on bow_pairwise."""', 'author': '"""baidu-nlp"""', 'author_email': '""""""', 'type': '"""nlp/sentiment_analysis"""'}), '(name=\'simnet_bow\', version=\'1.1.0\', summary=\n "Baidu\'s open-source similarity network model based on bow_pairwise.",\n author=\'baidu-nlp\', author_email=\'\', type=\'nlp/sentiment_analysis\')\n', (831, 1023), False, 'from paddlehub.module.module import moduleinfo\n'), ((1209, 1252), 'os.path.join', 'os.path.join', (['self.directory', '"""infer_model"""'], {}), "(self.directory, 'infer_model')\n", (1221, 1252), False, 'import os\n'), ((1279, 1330), 'os.path.join', 'os.path.join', (['self.directory', '"""assets"""', '"""vocab.txt"""'], {}), "(self.directory, 'assets', 'vocab.txt')\n", (1291, 1330), False, 'import os\n'), ((1352, 1379), 'simnet_bow.processor.load_vocab', 'load_vocab', (['self.vocab_path'], {}), '(self.vocab_path)\n', (1362, 1379), False, 'from simnet_bow.processor import load_vocab, preprocess, postprocess\n'), ((1406, 1458), 'os.path.join', 'os.path.join', (['self.directory', '"""assets"""', '"""params.txt"""'], {}), "(self.directory, 'assets', 'params.txt')\n", (1418, 1458), False, 'import os\n'), ((1853, 1895), 'paddle.fluid.core.AnalysisConfig', 'AnalysisConfig', (['self.pretrained_model_path'], {}), '(self.pretrained_model_path)\n', (1867, 1895), False, 'from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor\n'), ((2039, 2074), 'paddle.fluid.core.create_paddle_predictor', 'create_paddle_predictor', (['cpu_config'], {}), '(cpu_config)\n', (2062, 2074), False, 'from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor\n'), ((3057, 3073), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (3071, 3073), True, 'import paddle.fluid as fluid\n'), ((3088, 3109), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (3102, 3109), True, 'import paddle.fluid as fluid\n'), ((3163, 3242), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'self.pretrained_model_path', 'executor': 'exe'}), '(dirname=self.pretrained_model_path, executor=exe)\n', (3192, 3242), True, 'import paddle.fluid as fluid\n'), ((9379, 9509), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run the simnet_bow module."""', 'prog': '"""hub run simnet_bow"""', 'usage': '"""%(prog)s"""', 'add_help': '(True)'}), "(description='Run the simnet_bow module.', prog=\n 'hub run simnet_bow', usage='%(prog)s', add_help=True)\n", (9402, 9509), False, 'import argparse\n'), ((1687, 1709), 'paddlehub.Module', 'hub.Module', ([], {'name': '"""lac"""'}), "(name='lac')\n", (1697, 1709), True, 'import paddlehub as hub\n'), ((2290, 2332), 'paddle.fluid.core.AnalysisConfig', 'AnalysisConfig', (['self.pretrained_model_path'], {}), '(self.pretrained_model_path)\n', (2304, 2332), False, 'from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor\n'), ((2490, 2525), 'paddle.fluid.core.create_paddle_predictor', 'create_paddle_predictor', (['gpu_config'], {}), '(gpu_config)\n', (2513, 2525), False, 'from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor\n'), ((3491, 3513), 'paddlehub.common.paddle_helper.get_variable_info', 'get_variable_info', (['var'], {}), '(var)\n', (3508, 3513), False, 'from paddlehub.common.paddle_helper import get_variable_info\n'), ((8603, 8680), 'simnet_bow.processor.preprocess', 'preprocess', (['self.word_seg_module', 'self.vocab', 'batch_data', 'use_gpu', 'batch_size'], {}), '(self.word_seg_module, self.vocab, batch_data, use_gpu, batch_size)\n', (8613, 8680), False, 'from simnet_bow.processor import load_vocab, preprocess, postprocess\n'), ((9159, 9203), 'simnet_bow.processor.postprocess', 'postprocess', (['batch_out[1]', 'processed_results'], {}), '(batch_out[1], processed_results)\n', (9170, 9203), False, 'from simnet_bow.processor import load_vocab, preprocess, postprocess\n'), ((11379, 11410), 'os.path.exists', 'os.path.exists', (['args.input_file'], {}), '(args.input_file)\n', (11393, 11410), False, 'import os\n'), ((11559, 11608), 'paddlehub.io.parser.txt_parser.parse', 'txt_parser.parse', (['args.input_file'], {'use_strip': '(True)'}), '(args.input_file, use_strip=True)\n', (11575, 11608), False, 'from paddlehub.io.parser import txt_parser\n'), ((4714, 4728), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4722, 4728), True, 'import numpy as np\n'), ((5377, 5397), 'paddlehub.common.utils.sys_stdin_encoding', 'sys_stdin_encoding', ([], {}), '()\n', (5395, 5397), False, 'from paddlehub.common.utils import sys_stdin_encoding\n'), ((11910, 11930), 'paddlehub.common.utils.sys_stdin_encoding', 'sys_stdin_encoding', ([], {}), '()\n', (11928, 11930), False, 'from paddlehub.common.utils import sys_stdin_encoding\n'), ((12098, 12118), 'paddlehub.common.utils.sys_stdin_encoding', 'sys_stdin_encoding', ([], {}), '()\n', (12116, 12118), False, 'from paddlehub.common.utils import sys_stdin_encoding\n')] |
from itertools import combinations
import numpy as np
import utility
def sol2(vet1, indice, vet_in):
out = []
while indice >= 1:
# converto in lista la combinations
vet2 = list(combinations(vet1, indice))
for riga in vet2:
# trasformo il vettore in input in un array
tmp = np.array(riga)
# somma vet sulla stessa riga e salvo il risultato in temp
tmp = tmp.sum(axis=0)
if (all(x < 2 for x in tmp)):
# converto da binario ad interno
out.append(utility.bin_to_int2(vet_in, vet1, riga, len(vet2[0])))
if (not out):
indice = indice - 1
else:
break
return out | [
"itertools.combinations",
"numpy.array"
] | [((203, 229), 'itertools.combinations', 'combinations', (['vet1', 'indice'], {}), '(vet1, indice)\n', (215, 229), False, 'from itertools import combinations\n'), ((331, 345), 'numpy.array', 'np.array', (['riga'], {}), '(riga)\n', (339, 345), True, 'import numpy as np\n')] |
import os
import pytest
import taichi as ti
from taichi import approx
def run_mpm88_test():
dim = 2
N = 64
n_particles = N * N
n_grid = 128
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 2.0e-4
p_vol = (dx * 0.5)**2
p_rho = 1
p_mass = p_vol * p_rho
E = 400
x = ti.Vector.field(dim, dtype=ti.f32, shape=n_particles)
v = ti.Vector.field(dim, dtype=ti.f32, shape=n_particles)
C = ti.Matrix.field(dim, dim, dtype=ti.f32, shape=n_particles)
J = ti.field(dtype=ti.f32, shape=n_particles)
grid_v = ti.Vector.field(dim, dtype=ti.f32, shape=(n_grid, n_grid))
grid_m = ti.field(dtype=ti.f32, shape=(n_grid, n_grid))
@ti.kernel
def substep():
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
stress = -dt * p_vol * (J[p] - 1) * 4 * inv_dx * inv_dx * E
affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
offset = ti.Vector([i, j])
dpos = (offset.cast(float) - fx) * dx
weight = w[i][0] * w[j][1]
grid_v[base + offset].atomic_add(
weight * (p_mass * v[p] + affine @ dpos))
grid_m[base + offset].atomic_add(weight * p_mass)
for i, j in grid_m:
if grid_m[i, j] > 0:
bound = 3
inv_m = 1 / grid_m[i, j]
grid_v[i, j] = inv_m * grid_v[i, j]
grid_v[i, j][1] -= dt * 9.8
if i < bound and grid_v[i, j][0] < 0:
grid_v[i, j][0] = 0
if i > n_grid - bound and grid_v[i, j][0] > 0:
grid_v[i, j][0] = 0
if j < bound and grid_v[i, j][1] < 0:
grid_v[i, j][1] = 0
if j > n_grid - bound and grid_v[i, j][1] > 0:
grid_v[i, j][1] = 0
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [
0.5 * (1.5 - fx)**2, 0.75 - (fx - 1.0)**2, 0.5 * (fx - 0.5)**2
]
new_v = ti.Vector.zero(ti.f32, 2)
new_C = ti.Matrix.zero(ti.f32, 2, 2)
for i in ti.static(range(3)):
for j in ti.static(range(3)):
dpos = ti.Vector([i, j]).cast(float) - fx
g_v = grid_v[base + ti.Vector([i, j])]
weight = w[i][0] * w[j][1]
new_v += weight * g_v
new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx
v[p] = new_v
x[p] += dt * v[p]
J[p] *= 1 + dt * new_C.trace()
C[p] = new_C
# gui = ti._lib.core.GUI("MPM88", ti.core_veci(512, 512))
# canvas = gui.get_canvas()
for i in range(n_particles):
x[i] = [i % N / N * 0.4 + 0.2, i / N / N * 0.4 + 0.05]
v[i] = [0, -3]
J[i] = 1
for frame in range(10):
for s in range(50):
grid_v.fill([0, 0])
grid_m.fill(0)
substep()
pos = x.to_numpy()
pos[:, 1] *= 2
regression = [
0.31722742,
0.15826741,
0.10224003,
0.07810827,
]
for i in range(4):
assert (pos**(i + 1)).mean() == approx(regression[i], rel=1e-2)
@ti.test()
def test_mpm88():
run_mpm88_test()
def _is_appveyor():
# AppVeyor adds `APPVEYOR=True` ('true' on Ubuntu)
# https://www.appveyor.com/docs/environment-variables/
return os.getenv('APPVEYOR', '').lower() == 'true'
#TODO: Remove exclude of ti.metal
@pytest.mark.skipif(_is_appveyor(), reason='Stuck on Appveyor.')
@ti.test(require=ti.extension.async_mode, exclude=[ti.metal], async_mode=True)
def test_mpm88_async():
# It seems that all async tests on Appveyor run super slow. For example,
# on Appveyor, 10+ tests have passed during the execution of
# test_fuse_dense_x2y2z. Maybe thread synchronizations are expensive?
run_mpm88_test()
@ti.test(arch=[ti.cpu, ti.cuda, ti.opengl])
def test_mpm88_numpy_and_ndarray():
import numpy as np
dim = 2
N = 64
n_particles = N * N
n_grid = 128
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 2.0e-4
p_vol = (dx * 0.5)**2
p_rho = 1
p_mass = p_vol * p_rho
E = 400
@ti.kernel
def substep(x: ti.any_arr(element_dim=1), v: ti.any_arr(element_dim=1),
C: ti.any_arr(element_dim=2), J: ti.any_arr(),
grid_v: ti.any_arr(element_dim=1), grid_m: ti.any_arr()):
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
stress = -dt * p_vol * (J[p] - 1) * 4 * inv_dx * inv_dx * E
affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
offset = ti.Vector([i, j])
dpos = (offset.cast(float) - fx) * dx
weight = w[i][0] * w[j][1]
grid_v[base + offset].atomic_add(
weight * (p_mass * v[p] + affine @ dpos))
grid_m[base + offset].atomic_add(weight * p_mass)
for i, j in grid_m:
if grid_m[i, j] > 0:
bound = 3
inv_m = 1 / grid_m[i, j]
grid_v[i, j] = inv_m * grid_v[i, j]
grid_v[i, j][1] -= dt * 9.8
if i < bound and grid_v[i, j][0] < 0:
grid_v[i, j][0] = 0
if i > n_grid - bound and grid_v[i, j][0] > 0:
grid_v[i, j][0] = 0
if j < bound and grid_v[i, j][1] < 0:
grid_v[i, j][1] = 0
if j > n_grid - bound and grid_v[i, j][1] > 0:
grid_v[i, j][1] = 0
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [
0.5 * (1.5 - fx)**2, 0.75 - (fx - 1.0)**2, 0.5 * (fx - 0.5)**2
]
new_v = ti.Vector.zero(ti.f32, 2)
new_C = ti.Matrix.zero(ti.f32, 2, 2)
for i in ti.static(range(3)):
for j in ti.static(range(3)):
dpos = ti.Vector([i, j]).cast(float) - fx
g_v = grid_v[base + ti.Vector([i, j])]
weight = w[i][0] * w[j][1]
new_v += weight * g_v
new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx
v[p] = new_v
x[p] += dt * v[p]
J[p] *= 1 + dt * new_C.trace()
C[p] = new_C
def run_test(x, v, C, J, grid_v, grid_m):
for i in range(n_particles):
x[i] = [i % N / N * 0.4 + 0.2, i / N / N * 0.4 + 0.05]
v[i] = [0, -3]
J[i] = 1
for frame in range(10):
for s in range(50):
grid_v.fill(0)
grid_m.fill(0)
substep(x, v, C, J, grid_v, grid_m)
pos = x if isinstance(x, np.ndarray) else x.to_numpy()
pos[:, 1] *= 2
regression = [
0.31722742,
0.15826741,
0.10224003,
0.07810827,
]
for i in range(4):
assert (pos**(i + 1)).mean() == approx(regression[i], rel=1e-2)
def test_numpy():
x = np.zeros((n_particles, dim), dtype=np.float32)
v = np.zeros((n_particles, dim), dtype=np.float32)
C = np.zeros((n_particles, dim, dim), dtype=np.float32)
J = np.zeros(n_particles, dtype=np.float32)
grid_v = np.zeros((n_grid, n_grid, dim), dtype=np.float32)
grid_m = np.zeros((n_grid, n_grid), dtype=np.float32)
run_test(x, v, C, J, grid_v, grid_m)
def test_ndarray():
x = ti.Vector.ndarray(dim, ti.f32, n_particles)
v = ti.Vector.ndarray(dim, ti.f32, n_particles)
C = ti.Matrix.ndarray(dim, dim, ti.f32, n_particles)
J = ti.ndarray(ti.f32, n_particles)
grid_v = ti.Vector.ndarray(dim, ti.f32, (n_grid, n_grid))
grid_m = ti.ndarray(ti.f32, (n_grid, n_grid))
run_test(x, v, C, J, grid_v, grid_m)
test_numpy()
test_ndarray()
| [
"taichi.Vector.zero",
"taichi.test",
"taichi.ndarray",
"os.getenv",
"taichi.Matrix.ndarray",
"taichi.approx",
"taichi.Matrix",
"taichi.Vector.field",
"taichi.field",
"numpy.zeros",
"taichi.Matrix.zero",
"taichi.Vector",
"taichi.any_arr",
"taichi.Vector.ndarray",
"taichi.Matrix.field"
] | [((3540, 3549), 'taichi.test', 'ti.test', ([], {}), '()\n', (3547, 3549), True, 'import taichi as ti\n'), ((3882, 3959), 'taichi.test', 'ti.test', ([], {'require': 'ti.extension.async_mode', 'exclude': '[ti.metal]', 'async_mode': '(True)'}), '(require=ti.extension.async_mode, exclude=[ti.metal], async_mode=True)\n', (3889, 3959), True, 'import taichi as ti\n'), ((4224, 4266), 'taichi.test', 'ti.test', ([], {'arch': '[ti.cpu, ti.cuda, ti.opengl]'}), '(arch=[ti.cpu, ti.cuda, ti.opengl])\n', (4231, 4266), True, 'import taichi as ti\n'), ((304, 357), 'taichi.Vector.field', 'ti.Vector.field', (['dim'], {'dtype': 'ti.f32', 'shape': 'n_particles'}), '(dim, dtype=ti.f32, shape=n_particles)\n', (319, 357), True, 'import taichi as ti\n'), ((366, 419), 'taichi.Vector.field', 'ti.Vector.field', (['dim'], {'dtype': 'ti.f32', 'shape': 'n_particles'}), '(dim, dtype=ti.f32, shape=n_particles)\n', (381, 419), True, 'import taichi as ti\n'), ((428, 486), 'taichi.Matrix.field', 'ti.Matrix.field', (['dim', 'dim'], {'dtype': 'ti.f32', 'shape': 'n_particles'}), '(dim, dim, dtype=ti.f32, shape=n_particles)\n', (443, 486), True, 'import taichi as ti\n'), ((495, 536), 'taichi.field', 'ti.field', ([], {'dtype': 'ti.f32', 'shape': 'n_particles'}), '(dtype=ti.f32, shape=n_particles)\n', (503, 536), True, 'import taichi as ti\n'), ((550, 608), 'taichi.Vector.field', 'ti.Vector.field', (['dim'], {'dtype': 'ti.f32', 'shape': '(n_grid, n_grid)'}), '(dim, dtype=ti.f32, shape=(n_grid, n_grid))\n', (565, 608), True, 'import taichi as ti\n'), ((622, 668), 'taichi.field', 'ti.field', ([], {'dtype': 'ti.f32', 'shape': '(n_grid, n_grid)'}), '(dtype=ti.f32, shape=(n_grid, n_grid))\n', (630, 668), True, 'import taichi as ti\n'), ((7707, 7753), 'numpy.zeros', 'np.zeros', (['(n_particles, dim)'], {'dtype': 'np.float32'}), '((n_particles, dim), dtype=np.float32)\n', (7715, 7753), True, 'import numpy as np\n'), ((7766, 7812), 'numpy.zeros', 'np.zeros', (['(n_particles, dim)'], {'dtype': 'np.float32'}), '((n_particles, dim), dtype=np.float32)\n', (7774, 7812), True, 'import numpy as np\n'), ((7825, 7876), 'numpy.zeros', 'np.zeros', (['(n_particles, dim, dim)'], {'dtype': 'np.float32'}), '((n_particles, dim, dim), dtype=np.float32)\n', (7833, 7876), True, 'import numpy as np\n'), ((7889, 7928), 'numpy.zeros', 'np.zeros', (['n_particles'], {'dtype': 'np.float32'}), '(n_particles, dtype=np.float32)\n', (7897, 7928), True, 'import numpy as np\n'), ((7946, 7995), 'numpy.zeros', 'np.zeros', (['(n_grid, n_grid, dim)'], {'dtype': 'np.float32'}), '((n_grid, n_grid, dim), dtype=np.float32)\n', (7954, 7995), True, 'import numpy as np\n'), ((8013, 8057), 'numpy.zeros', 'np.zeros', (['(n_grid, n_grid)'], {'dtype': 'np.float32'}), '((n_grid, n_grid), dtype=np.float32)\n', (8021, 8057), True, 'import numpy as np\n'), ((8140, 8183), 'taichi.Vector.ndarray', 'ti.Vector.ndarray', (['dim', 'ti.f32', 'n_particles'], {}), '(dim, ti.f32, n_particles)\n', (8157, 8183), True, 'import taichi as ti\n'), ((8196, 8239), 'taichi.Vector.ndarray', 'ti.Vector.ndarray', (['dim', 'ti.f32', 'n_particles'], {}), '(dim, ti.f32, n_particles)\n', (8213, 8239), True, 'import taichi as ti\n'), ((8252, 8300), 'taichi.Matrix.ndarray', 'ti.Matrix.ndarray', (['dim', 'dim', 'ti.f32', 'n_particles'], {}), '(dim, dim, ti.f32, n_particles)\n', (8269, 8300), True, 'import taichi as ti\n'), ((8313, 8344), 'taichi.ndarray', 'ti.ndarray', (['ti.f32', 'n_particles'], {}), '(ti.f32, n_particles)\n', (8323, 8344), True, 'import taichi as ti\n'), ((8362, 8410), 'taichi.Vector.ndarray', 'ti.Vector.ndarray', (['dim', 'ti.f32', '(n_grid, n_grid)'], {}), '(dim, ti.f32, (n_grid, n_grid))\n', (8379, 8410), True, 'import taichi as ti\n'), ((8428, 8464), 'taichi.ndarray', 'ti.ndarray', (['ti.f32', '(n_grid, n_grid)'], {}), '(ti.f32, (n_grid, n_grid))\n', (8438, 8464), True, 'import taichi as ti\n'), ((2353, 2378), 'taichi.Vector.zero', 'ti.Vector.zero', (['ti.f32', '(2)'], {}), '(ti.f32, 2)\n', (2367, 2378), True, 'import taichi as ti\n'), ((2399, 2427), 'taichi.Matrix.zero', 'ti.Matrix.zero', (['ti.f32', '(2)', '(2)'], {}), '(ti.f32, 2, 2)\n', (2413, 2427), True, 'import taichi as ti\n'), ((3505, 3536), 'taichi.approx', 'approx', (['regression[i]'], {'rel': '(0.01)'}), '(regression[i], rel=0.01)\n', (3511, 3536), False, 'from taichi import approx\n'), ((4561, 4586), 'taichi.any_arr', 'ti.any_arr', ([], {'element_dim': '(1)'}), '(element_dim=1)\n', (4571, 4586), True, 'import taichi as ti\n'), ((4591, 4616), 'taichi.any_arr', 'ti.any_arr', ([], {'element_dim': '(1)'}), '(element_dim=1)\n', (4601, 4616), True, 'import taichi as ti\n'), ((4637, 4662), 'taichi.any_arr', 'ti.any_arr', ([], {'element_dim': '(2)'}), '(element_dim=2)\n', (4647, 4662), True, 'import taichi as ti\n'), ((4667, 4679), 'taichi.any_arr', 'ti.any_arr', ([], {}), '()\n', (4677, 4679), True, 'import taichi as ti\n'), ((4705, 4730), 'taichi.any_arr', 'ti.any_arr', ([], {'element_dim': '(1)'}), '(element_dim=1)\n', (4715, 4730), True, 'import taichi as ti\n'), ((4740, 4752), 'taichi.any_arr', 'ti.any_arr', ([], {}), '()\n', (4750, 4752), True, 'import taichi as ti\n'), ((6404, 6429), 'taichi.Vector.zero', 'ti.Vector.zero', (['ti.f32', '(2)'], {}), '(ti.f32, 2)\n', (6418, 6429), True, 'import taichi as ti\n'), ((6450, 6478), 'taichi.Matrix.zero', 'ti.Matrix.zero', (['ti.f32', '(2)', '(2)'], {}), '(ti.f32, 2, 2)\n', (6464, 6478), True, 'import taichi as ti\n'), ((997, 1034), 'taichi.Matrix', 'ti.Matrix', (['[[stress, 0], [0, stress]]'], {}), '([[stress, 0], [0, stress]])\n', (1006, 1034), True, 'import taichi as ti\n'), ((3736, 3761), 'os.getenv', 'os.getenv', (['"""APPVEYOR"""', '""""""'], {}), "('APPVEYOR', '')\n", (3745, 3761), False, 'import os\n'), ((5048, 5085), 'taichi.Matrix', 'ti.Matrix', (['[[stress, 0], [0, stress]]'], {}), '([[stress, 0], [0, stress]])\n', (5057, 5085), True, 'import taichi as ti\n'), ((7640, 7671), 'taichi.approx', 'approx', (['regression[i]'], {'rel': '(0.01)'}), '(regression[i], rel=0.01)\n', (7646, 7671), False, 'from taichi import approx\n'), ((1168, 1185), 'taichi.Vector', 'ti.Vector', (['[i, j]'], {}), '([i, j])\n', (1177, 1185), True, 'import taichi as ti\n'), ((5219, 5236), 'taichi.Vector', 'ti.Vector', (['[i, j]'], {}), '([i, j])\n', (5228, 5236), True, 'import taichi as ti\n'), ((2618, 2635), 'taichi.Vector', 'ti.Vector', (['[i, j]'], {}), '([i, j])\n', (2627, 2635), True, 'import taichi as ti\n'), ((6669, 6686), 'taichi.Vector', 'ti.Vector', (['[i, j]'], {}), '([i, j])\n', (6678, 6686), True, 'import taichi as ti\n'), ((2543, 2560), 'taichi.Vector', 'ti.Vector', (['[i, j]'], {}), '([i, j])\n', (2552, 2560), True, 'import taichi as ti\n'), ((6594, 6611), 'taichi.Vector', 'ti.Vector', (['[i, j]'], {}), '([i, j])\n', (6603, 6611), True, 'import taichi as ti\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.