file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
rastermanager.py | import os
import sys
import numpy as np
import time
import yaml
import calendar
from datetime import datetime, timedelta, date
from s3fs.core import S3FileSystem
import boto3
import fiona
import pandas as pd
import rasterio.mask
from rasterio.crs import CRS
from rasterio.enums import Resampling
from rasterio import shutil as rio_shutil
from rasterio.vrt import WarpedVRT
from timeit import default_timer as t_now
from .pathmanager import PathManager
from .box_poly import box_create_ugly_proprietary_shapefile_plus_json_from_tile
from .log_logger import log_make_logger
from .optimeister import OptiMeister
class RasterManager:
"""
This class addresses all the data wrangling needs to get the data sets needed in the model
to the extent nad resolution chosen by the user.
The user needs to input a sample shapefile (polygon) and geotiff file that is used to
get the attributes for processing extent and resolution, etc.
"""
# TODO - get large extent tif files for testing the RasterManager warping functions
# TODO - make overlapping of 2 dataset work
# TODO - rasterio either clips based on a sample raster and shapefile, or based on user defined geo info
# #TODO - https://rasterio.readthedocs.io/en/latest/topics/masking-by-shapefile.html
# Todo - test raster manager as a standalone module.
out_root = None
# --- geographic info for destination files ---
crs = None
cols = None
rows = None
xres = None
yres = None
# left geo coord 'e.g. Westernmost Longitude"
left = None
# top geo coord e.g highest Latitude extent
top = None
transform = [xres, 0.0, left, 0.0, yres, top]
geoproperties_file = None
# can contain multiple features of interest
shapefile = None
temp_folder = None
def __init__(self, config_dict, shp=None):
self.log = log_make_logger('RASTER MANAGER LOG')
self.optimize = False
self.config_dict = config_dict
tile = self.config_dict['tile']
self.log.info('tile name is - {}'.format(tile))
if 'tile' in tile:
self.log.info("using scalable tile names {}".format(tile))
#bucket_name = self.config_dict['out_root'].split('/')[0]
#today = date.today()
#print("Current date =", today)
#date_str=today.strftime("%m_%d_%Y")
#self.config_dict['out_root'] = bucket_name + '/out/DelawareRiverBasin/Run' + date_str + '/' + tile
if self.config_dict['optimize']:
self.optimize = True
self.opti=OptiMeister(config_dict,shp)
# self.geoproperties_file = config_dict.geoproperties_file
# self.shapefile = config_dict.shapefile
# self.temp_folder = os.path.join(config_dict.out_root, config_dict.temp_folder)
# self.temp_folder = config_dict['temp_folder']
self.temp_folder = './' + tile
self.log.info('temp folder is'.format(self.temp_folder))
if not os.path.exists(self.temp_folder):
os.makedirs(self.temp_folder)
# if the user does not include a shapefile in VegET, a box based on the tile name will be created.
if shp == None:
self.shapefile = box_create_ugly_proprietary_shapefile_plus_json_from_tile(self.temp_folder, tile)
else:
self.shapefile = shp
self.geoproperties_file = config_dict['geoproperties_file']
if self.geoproperties_file == None or self.shapefile==None:
print('Assuming the user entered values in the config_dict for boundaries of the AOI not implemented at thsi time')
sys.exit(0)
def output_rasters_cloud(self, arr, outname):
"""
This function creates geotiff files from the model output arrays.
"""
if self.config_dict['path_mode'] == 'aws':
# later on deleted by s3_delete_local()
# local_outpath = os.path.join(self.config_dict['temp_folder'], outname)
local_outname = outname.split('/')[-1]
local_outpath = os.path.join(self.temp_folder, local_outname)
self.log.debug('local_outpath {}'.format(local_outpath))
t0 = t_now()
band1 = arr
# write to a temp folder
with rasterio.open(local_outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,
count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:
wrast.write(band1, indexes=1)
# Buckets are not directories but you can treat them like they are
# bucket_name = os.path.split(self.config_dict['out_root'])[0] # dev-et-data
# bucket_prefix = os.path.split(self.config_dict['out_root'])[-1] # tile_modelrun1
bucket_name = self.config_dict['out_root'].split('/')[0]
bucket_prefix_list = self.config_dict['out_root'].split('/')[1:]
print(bucket_prefix_list)
bucket_prefix = '/'.join(bucket_prefix_list)
print("bucket prefix =", bucket_prefix)
bucket_filepath = os.path.join(bucket_prefix, outname) # os.path.join(dev-et-data/tile_modelrun1, outname)
# uploads to aws bucket with filepath
self.s3_delete_local(local_file=local_outpath, bucket=bucket_name, bucket_filepath=bucket_filepath)
t_total = t_now() - t0
self.log.info("OUTPUT - TIME - {} - {}".format(t_total, bucket_filepath))
elif self.config_dict['path_mode'] == 'google':
|
else:
print('PATH MODE in config is not set properly for the cloud implementation of output_Rasters')
sys.exit(0)
# ----------- create output rasters -----------------
def output_rasters(self, arr, outdir, outname):
"""
This function creates geotiff files from the model output arrays.
"""
# make the subdirectories if we need 'em
if not os.path.exists(outdir):
os.makedirs(outdir)
if self.config_dict['path_mode'] == 'local':
outpath = os.path.join(outdir, outname)
print('the outpath for file {} is {}'.format(outname, outpath))
band1 = arr
with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,
count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:
wrast.write(band1, indexes=1)
else:
print('PATH MODE in config is not set properly for the local implementation of output_Rasters')
sys.exit(0)
def set_model_std_grid(self, feat=0):
"""Clips and crops a tiff to the extent of a feature in a shapefile
:param feat: feat is the feature id of the shapefile from like a GeoJSON)
# https://rasterio.readthedocs.io/en/latest/topics/virtual-warping.html
"""
print(self.shapefile)
with fiona.open(self.shapefile, 'r') as shapefile:
# todo - set up an error if user has shapefile with more than one feature. GELP n STEFFI
# shape = shapefile[0]['geometry']
shapes = [feature["geometry"] for feature in shapefile]
for feature in shapefile:
# matching the FID of the given shapefile from a typical geoJSON (Not Ordered Dict nonsense)
if feat == feature['id']:
shapes = [feature['geometry']]
print(f'geoproperties file {self.geoproperties_file}')
print('This is the shape var:', shapes)
with rasterio.open(self.geoproperties_file, 'r') as src:
out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True)
out_meta = src.meta
# once the image is cropped, the image metadata dictionary is updated with the cropped transform and bounds.
out_meta.update({"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform})
self.crs = out_meta['crs']
# TODO - Set Blocksize for sample raster and other useful optimization thingys
self.transform = out_meta['transform']
self.left = self.transform[2]
self.top = self .transform[5]
self.cols = out_meta['width']
self.rows = out_meta['height']
self.xres = self.transform[0]
self.yres = self.transform[4]
# return out_meta
def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):
"""
Uses rasterio virtual raster to standardize grids of different crs, resolution, boundaries based on a shapefile geometry feature
:param inputs: a list of (daily) raster input files for the water balance.
:param outloc: output locations 'temp' for the virtual files
:return: list of numpy arrays
"""
outputs = []
npy_outputs = []
if resamplemethod == 'nearest':
rs = Resampling.nearest
else:
print('only nearest neighbor resampling is supported at this time')
sys.exit(0)
for i, warpfile in enumerate(inputs):
# print('warpfile', warpfile)
with rasterio.open(warpfile) as src:
# TODO - make the default configurable.
# if src.crs == None:
# src.crs = CRS.from_epsg(4326)
# create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.
with WarpedVRT(src, resampling=rs,
crs=self.crs,
transform=self.transform,
height=self.rows,
width=self.cols) as vrt:
data = vrt.read()
# print(type(vrt))
# save the file as an enumerated tiff. reopen outside this loop with the outputs list
outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))
rio_shutil.copy(vrt, outwarp, driver='GTiff')
outputs.append(outwarp)
# output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.
# for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays
# from this method for us in the rest of the code.
for ow in outputs:
with rasterio.open(ow, 'r') as src:
arr = src.read(1)
npy_outputs.append(arr)
return npy_outputs
def _warp_one(self, warpfile, rs):
t0 = t_now()
cnt=10
while(cnt>0):
try:
with rasterio.open(warpfile) as src:
if src.crs == None:
src.crs = CRS.from_epsg(4326)
# create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.
with WarpedVRT(src, resampling=rs,
crs=self.crs,
transform=self.transform,
height=self.rows,
width=self.cols) as vrt:
data = vrt.read(1)
# print(type(vrt))
print("data shape =", data.shape)
self.log.info("_warp_one Completed {}".format(warpfile))
t_total = t_now() - t0
self.log.info("WARP - TIME - {} - {}".format(t_total, warpfile))
return data
except rasterio.errors.RasterioIOError:
print("Unexpected error:", sys.exc_info()[0])
print('oops',cnt)
cnt = cnt - 1
time.sleep(4)
def _warp_inputs(self, inputs, resamplemethod):
self.log.info("_warp_inputs")
outputs = []
npy_outputs = []
if resamplemethod == 'nearest':
rs = Resampling.nearest
else:
print('only nearest neighbor resampling is supported at this time')
sys.exit(0)
for i, warpfile in enumerate(inputs):
print('warpfile', warpfile)
if (self.optimize):
data = self.opti.o_warp_one(warpfile, rs, self.crs, self.transform, self.rows, self.cols)
else:
data = self._warp_one(warpfile, rs)
npy_outputs.append(data)
return npy_outputs
def scale_rasters(self, numpys, scalefactors):
vals = []
for arr, sc in zip(numpys, scalefactors):
arr *= sc
vals.append(arr)
return vals
def normalize_to_std_grid_fast(self, inputs, resamplemethod='nearest'):
"""
Uses rasterio virtual raster to standardize grids of different crs, resolution, boundaries based on a shapefile geometry feature
:param inputs: a list of (daily) raster input files for the water balance.
:param outloc: output locations 'temp' for the virtual files
:return: list of numpy arrays
"""
npy_outputs = self._warp_inputs(inputs, resamplemethod)
return npy_outputs
def s3_delete_local(self, local_file, bucket, bucket_filepath):
"""
This function will move the model outputs from a local folder to a cloud bucket.
:param local_file: path the the local geo file
:param outpath: path of a directory to be created in the cloud bucket
:param bucket: name of the cloud bucket = 'dev-et-data'
:param bucket_folder: "folder" in cloud bucket = 'v1DRB_outputs'
:return:
"""
s3 = boto3.client('s3')
with open(local_file, "rb") as f:
if 'vsis3' in bucket:
bucket = bucket.split('/')[-1]
print(bucket, bucket_filepath)
s3.upload_fileobj(f, bucket, bucket_filepath)
os.remove(local_file)
| print('google path mode not yet implemented')
sys.exit(0) | conditional_block |
rastermanager.py | import os
import sys
import numpy as np
import time
import yaml
import calendar
from datetime import datetime, timedelta, date
from s3fs.core import S3FileSystem
import boto3
import fiona
import pandas as pd
import rasterio.mask
from rasterio.crs import CRS
from rasterio.enums import Resampling
from rasterio import shutil as rio_shutil
from rasterio.vrt import WarpedVRT
from timeit import default_timer as t_now
from .pathmanager import PathManager
from .box_poly import box_create_ugly_proprietary_shapefile_plus_json_from_tile
from .log_logger import log_make_logger
from .optimeister import OptiMeister
class RasterManager:
"""
This class addresses all the data wrangling needs to get the data sets needed in the model
to the extent nad resolution chosen by the user.
The user needs to input a sample shapefile (polygon) and geotiff file that is used to
get the attributes for processing extent and resolution, etc.
"""
# TODO - get large extent tif files for testing the RasterManager warping functions
# TODO - make overlapping of 2 dataset work
# TODO - rasterio either clips based on a sample raster and shapefile, or based on user defined geo info
# #TODO - https://rasterio.readthedocs.io/en/latest/topics/masking-by-shapefile.html
# Todo - test raster manager as a standalone module.
out_root = None
# --- geographic info for destination files ---
crs = None
cols = None
rows = None
xres = None
yres = None
# left geo coord 'e.g. Westernmost Longitude"
left = None
# top geo coord e.g highest Latitude extent
top = None
transform = [xres, 0.0, left, 0.0, yres, top]
geoproperties_file = None
# can contain multiple features of interest
shapefile = None
temp_folder = None
def __init__(self, config_dict, shp=None):
self.log = log_make_logger('RASTER MANAGER LOG')
self.optimize = False
self.config_dict = config_dict
tile = self.config_dict['tile']
| self.log.info('tile name is - {}'.format(tile))
if 'tile' in tile:
self.log.info("using scalable tile names {}".format(tile))
#bucket_name = self.config_dict['out_root'].split('/')[0]
#today = date.today()
#print("Current date =", today)
#date_str=today.strftime("%m_%d_%Y")
#self.config_dict['out_root'] = bucket_name + '/out/DelawareRiverBasin/Run' + date_str + '/' + tile
if self.config_dict['optimize']:
self.optimize = True
self.opti=OptiMeister(config_dict,shp)
# self.geoproperties_file = config_dict.geoproperties_file
# self.shapefile = config_dict.shapefile
# self.temp_folder = os.path.join(config_dict.out_root, config_dict.temp_folder)
# self.temp_folder = config_dict['temp_folder']
self.temp_folder = './' + tile
self.log.info('temp folder is'.format(self.temp_folder))
if not os.path.exists(self.temp_folder):
os.makedirs(self.temp_folder)
# if the user does not include a shapefile in VegET, a box based on the tile name will be created.
if shp == None:
self.shapefile = box_create_ugly_proprietary_shapefile_plus_json_from_tile(self.temp_folder, tile)
else:
self.shapefile = shp
self.geoproperties_file = config_dict['geoproperties_file']
if self.geoproperties_file == None or self.shapefile==None:
print('Assuming the user entered values in the config_dict for boundaries of the AOI not implemented at thsi time')
sys.exit(0)
def output_rasters_cloud(self, arr, outname):
"""
This function creates geotiff files from the model output arrays.
"""
if self.config_dict['path_mode'] == 'aws':
# later on deleted by s3_delete_local()
# local_outpath = os.path.join(self.config_dict['temp_folder'], outname)
local_outname = outname.split('/')[-1]
local_outpath = os.path.join(self.temp_folder, local_outname)
self.log.debug('local_outpath {}'.format(local_outpath))
t0 = t_now()
band1 = arr
# write to a temp folder
with rasterio.open(local_outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,
count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:
wrast.write(band1, indexes=1)
# Buckets are not directories but you can treat them like they are
# bucket_name = os.path.split(self.config_dict['out_root'])[0] # dev-et-data
# bucket_prefix = os.path.split(self.config_dict['out_root'])[-1] # tile_modelrun1
bucket_name = self.config_dict['out_root'].split('/')[0]
bucket_prefix_list = self.config_dict['out_root'].split('/')[1:]
print(bucket_prefix_list)
bucket_prefix = '/'.join(bucket_prefix_list)
print("bucket prefix =", bucket_prefix)
bucket_filepath = os.path.join(bucket_prefix, outname) # os.path.join(dev-et-data/tile_modelrun1, outname)
# uploads to aws bucket with filepath
self.s3_delete_local(local_file=local_outpath, bucket=bucket_name, bucket_filepath=bucket_filepath)
t_total = t_now() - t0
self.log.info("OUTPUT - TIME - {} - {}".format(t_total, bucket_filepath))
elif self.config_dict['path_mode'] == 'google':
print('google path mode not yet implemented')
sys.exit(0)
else:
print('PATH MODE in config is not set properly for the cloud implementation of output_Rasters')
sys.exit(0)
# ----------- create output rasters -----------------
def output_rasters(self, arr, outdir, outname):
"""
This function creates geotiff files from the model output arrays.
"""
# make the subdirectories if we need 'em
if not os.path.exists(outdir):
os.makedirs(outdir)
if self.config_dict['path_mode'] == 'local':
outpath = os.path.join(outdir, outname)
print('the outpath for file {} is {}'.format(outname, outpath))
band1 = arr
with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,
count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:
wrast.write(band1, indexes=1)
else:
print('PATH MODE in config is not set properly for the local implementation of output_Rasters')
sys.exit(0)
def set_model_std_grid(self, feat=0):
"""Clips and crops a tiff to the extent of a feature in a shapefile
:param feat: feat is the feature id of the shapefile from like a GeoJSON)
# https://rasterio.readthedocs.io/en/latest/topics/virtual-warping.html
"""
print(self.shapefile)
with fiona.open(self.shapefile, 'r') as shapefile:
# todo - set up an error if user has shapefile with more than one feature. GELP n STEFFI
# shape = shapefile[0]['geometry']
shapes = [feature["geometry"] for feature in shapefile]
for feature in shapefile:
# matching the FID of the given shapefile from a typical geoJSON (Not Ordered Dict nonsense)
if feat == feature['id']:
shapes = [feature['geometry']]
print(f'geoproperties file {self.geoproperties_file}')
print('This is the shape var:', shapes)
with rasterio.open(self.geoproperties_file, 'r') as src:
out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True)
out_meta = src.meta
# once the image is cropped, the image metadata dictionary is updated with the cropped transform and bounds.
out_meta.update({"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform})
self.crs = out_meta['crs']
# TODO - Set Blocksize for sample raster and other useful optimization thingys
self.transform = out_meta['transform']
self.left = self.transform[2]
self.top = self .transform[5]
self.cols = out_meta['width']
self.rows = out_meta['height']
self.xres = self.transform[0]
self.yres = self.transform[4]
# return out_meta
def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):
"""
Uses rasterio virtual raster to standardize grids of different crs, resolution, boundaries based on a shapefile geometry feature
:param inputs: a list of (daily) raster input files for the water balance.
:param outloc: output locations 'temp' for the virtual files
:return: list of numpy arrays
"""
outputs = []
npy_outputs = []
if resamplemethod == 'nearest':
rs = Resampling.nearest
else:
print('only nearest neighbor resampling is supported at this time')
sys.exit(0)
for i, warpfile in enumerate(inputs):
# print('warpfile', warpfile)
with rasterio.open(warpfile) as src:
# TODO - make the default configurable.
# if src.crs == None:
# src.crs = CRS.from_epsg(4326)
# create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.
with WarpedVRT(src, resampling=rs,
crs=self.crs,
transform=self.transform,
height=self.rows,
width=self.cols) as vrt:
data = vrt.read()
# print(type(vrt))
# save the file as an enumerated tiff. reopen outside this loop with the outputs list
outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))
rio_shutil.copy(vrt, outwarp, driver='GTiff')
outputs.append(outwarp)
# output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.
# for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays
# from this method for us in the rest of the code.
for ow in outputs:
with rasterio.open(ow, 'r') as src:
arr = src.read(1)
npy_outputs.append(arr)
return npy_outputs
def _warp_one(self, warpfile, rs):
t0 = t_now()
cnt=10
while(cnt>0):
try:
with rasterio.open(warpfile) as src:
if src.crs == None:
src.crs = CRS.from_epsg(4326)
# create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.
with WarpedVRT(src, resampling=rs,
crs=self.crs,
transform=self.transform,
height=self.rows,
width=self.cols) as vrt:
data = vrt.read(1)
# print(type(vrt))
print("data shape =", data.shape)
self.log.info("_warp_one Completed {}".format(warpfile))
t_total = t_now() - t0
self.log.info("WARP - TIME - {} - {}".format(t_total, warpfile))
return data
except rasterio.errors.RasterioIOError:
print("Unexpected error:", sys.exc_info()[0])
print('oops',cnt)
cnt = cnt - 1
time.sleep(4)
def _warp_inputs(self, inputs, resamplemethod):
self.log.info("_warp_inputs")
outputs = []
npy_outputs = []
if resamplemethod == 'nearest':
rs = Resampling.nearest
else:
print('only nearest neighbor resampling is supported at this time')
sys.exit(0)
for i, warpfile in enumerate(inputs):
print('warpfile', warpfile)
if (self.optimize):
data = self.opti.o_warp_one(warpfile, rs, self.crs, self.transform, self.rows, self.cols)
else:
data = self._warp_one(warpfile, rs)
npy_outputs.append(data)
return npy_outputs
def scale_rasters(self, numpys, scalefactors):
vals = []
for arr, sc in zip(numpys, scalefactors):
arr *= sc
vals.append(arr)
return vals
def normalize_to_std_grid_fast(self, inputs, resamplemethod='nearest'):
"""
Uses rasterio virtual raster to standardize grids of different crs, resolution, boundaries based on a shapefile geometry feature
:param inputs: a list of (daily) raster input files for the water balance.
:param outloc: output locations 'temp' for the virtual files
:return: list of numpy arrays
"""
npy_outputs = self._warp_inputs(inputs, resamplemethod)
return npy_outputs
def s3_delete_local(self, local_file, bucket, bucket_filepath):
"""
This function will move the model outputs from a local folder to a cloud bucket.
:param local_file: path the the local geo file
:param outpath: path of a directory to be created in the cloud bucket
:param bucket: name of the cloud bucket = 'dev-et-data'
:param bucket_folder: "folder" in cloud bucket = 'v1DRB_outputs'
:return:
"""
s3 = boto3.client('s3')
with open(local_file, "rb") as f:
if 'vsis3' in bucket:
bucket = bucket.split('/')[-1]
print(bucket, bucket_filepath)
s3.upload_fileobj(f, bucket, bucket_filepath)
os.remove(local_file) | random_line_split | |
rastermanager.py | import os
import sys
import numpy as np
import time
import yaml
import calendar
from datetime import datetime, timedelta, date
from s3fs.core import S3FileSystem
import boto3
import fiona
import pandas as pd
import rasterio.mask
from rasterio.crs import CRS
from rasterio.enums import Resampling
from rasterio import shutil as rio_shutil
from rasterio.vrt import WarpedVRT
from timeit import default_timer as t_now
from .pathmanager import PathManager
from .box_poly import box_create_ugly_proprietary_shapefile_plus_json_from_tile
from .log_logger import log_make_logger
from .optimeister import OptiMeister
class RasterManager:
"""
This class addresses all the data wrangling needs to get the data sets needed in the model
to the extent nad resolution chosen by the user.
The user needs to input a sample shapefile (polygon) and geotiff file that is used to
get the attributes for processing extent and resolution, etc.
"""
# TODO - get large extent tif files for testing the RasterManager warping functions
# TODO - make overlapping of 2 dataset work
# TODO - rasterio either clips based on a sample raster and shapefile, or based on user defined geo info
# #TODO - https://rasterio.readthedocs.io/en/latest/topics/masking-by-shapefile.html
# Todo - test raster manager as a standalone module.
out_root = None
# --- geographic info for destination files ---
crs = None
cols = None
rows = None
xres = None
yres = None
# left geo coord 'e.g. Westernmost Longitude"
left = None
# top geo coord e.g highest Latitude extent
top = None
transform = [xres, 0.0, left, 0.0, yres, top]
geoproperties_file = None
# can contain multiple features of interest
shapefile = None
temp_folder = None
def __init__(self, config_dict, shp=None):
self.log = log_make_logger('RASTER MANAGER LOG')
self.optimize = False
self.config_dict = config_dict
tile = self.config_dict['tile']
self.log.info('tile name is - {}'.format(tile))
if 'tile' in tile:
self.log.info("using scalable tile names {}".format(tile))
#bucket_name = self.config_dict['out_root'].split('/')[0]
#today = date.today()
#print("Current date =", today)
#date_str=today.strftime("%m_%d_%Y")
#self.config_dict['out_root'] = bucket_name + '/out/DelawareRiverBasin/Run' + date_str + '/' + tile
if self.config_dict['optimize']:
self.optimize = True
self.opti=OptiMeister(config_dict,shp)
# self.geoproperties_file = config_dict.geoproperties_file
# self.shapefile = config_dict.shapefile
# self.temp_folder = os.path.join(config_dict.out_root, config_dict.temp_folder)
# self.temp_folder = config_dict['temp_folder']
self.temp_folder = './' + tile
self.log.info('temp folder is'.format(self.temp_folder))
if not os.path.exists(self.temp_folder):
os.makedirs(self.temp_folder)
# if the user does not include a shapefile in VegET, a box based on the tile name will be created.
if shp == None:
self.shapefile = box_create_ugly_proprietary_shapefile_plus_json_from_tile(self.temp_folder, tile)
else:
self.shapefile = shp
self.geoproperties_file = config_dict['geoproperties_file']
if self.geoproperties_file == None or self.shapefile==None:
print('Assuming the user entered values in the config_dict for boundaries of the AOI not implemented at thsi time')
sys.exit(0)
def output_rasters_cloud(self, arr, outname):
"""
This function creates geotiff files from the model output arrays.
"""
if self.config_dict['path_mode'] == 'aws':
# later on deleted by s3_delete_local()
# local_outpath = os.path.join(self.config_dict['temp_folder'], outname)
local_outname = outname.split('/')[-1]
local_outpath = os.path.join(self.temp_folder, local_outname)
self.log.debug('local_outpath {}'.format(local_outpath))
t0 = t_now()
band1 = arr
# write to a temp folder
with rasterio.open(local_outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,
count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:
wrast.write(band1, indexes=1)
# Buckets are not directories but you can treat them like they are
# bucket_name = os.path.split(self.config_dict['out_root'])[0] # dev-et-data
# bucket_prefix = os.path.split(self.config_dict['out_root'])[-1] # tile_modelrun1
bucket_name = self.config_dict['out_root'].split('/')[0]
bucket_prefix_list = self.config_dict['out_root'].split('/')[1:]
print(bucket_prefix_list)
bucket_prefix = '/'.join(bucket_prefix_list)
print("bucket prefix =", bucket_prefix)
bucket_filepath = os.path.join(bucket_prefix, outname) # os.path.join(dev-et-data/tile_modelrun1, outname)
# uploads to aws bucket with filepath
self.s3_delete_local(local_file=local_outpath, bucket=bucket_name, bucket_filepath=bucket_filepath)
t_total = t_now() - t0
self.log.info("OUTPUT - TIME - {} - {}".format(t_total, bucket_filepath))
elif self.config_dict['path_mode'] == 'google':
print('google path mode not yet implemented')
sys.exit(0)
else:
print('PATH MODE in config is not set properly for the cloud implementation of output_Rasters')
sys.exit(0)
# ----------- create output rasters -----------------
def output_rasters(self, arr, outdir, outname):
"""
This function creates geotiff files from the model output arrays.
"""
# make the subdirectories if we need 'em
if not os.path.exists(outdir):
os.makedirs(outdir)
if self.config_dict['path_mode'] == 'local':
outpath = os.path.join(outdir, outname)
print('the outpath for file {} is {}'.format(outname, outpath))
band1 = arr
with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,
count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:
wrast.write(band1, indexes=1)
else:
print('PATH MODE in config is not set properly for the local implementation of output_Rasters')
sys.exit(0)
def set_model_std_grid(self, feat=0):
"""Clips and crops a tiff to the extent of a feature in a shapefile
:param feat: feat is the feature id of the shapefile from like a GeoJSON)
# https://rasterio.readthedocs.io/en/latest/topics/virtual-warping.html
"""
print(self.shapefile)
with fiona.open(self.shapefile, 'r') as shapefile:
# todo - set up an error if user has shapefile with more than one feature. GELP n STEFFI
# shape = shapefile[0]['geometry']
shapes = [feature["geometry"] for feature in shapefile]
for feature in shapefile:
# matching the FID of the given shapefile from a typical geoJSON (Not Ordered Dict nonsense)
if feat == feature['id']:
shapes = [feature['geometry']]
print(f'geoproperties file {self.geoproperties_file}')
print('This is the shape var:', shapes)
with rasterio.open(self.geoproperties_file, 'r') as src:
out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True)
out_meta = src.meta
# once the image is cropped, the image metadata dictionary is updated with the cropped transform and bounds.
out_meta.update({"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform})
self.crs = out_meta['crs']
# TODO - Set Blocksize for sample raster and other useful optimization thingys
self.transform = out_meta['transform']
self.left = self.transform[2]
self.top = self .transform[5]
self.cols = out_meta['width']
self.rows = out_meta['height']
self.xres = self.transform[0]
self.yres = self.transform[4]
# return out_meta
def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):
"""
Uses rasterio virtual raster to standardize grids of different crs, resolution, boundaries based on a shapefile geometry feature
:param inputs: a list of (daily) raster input files for the water balance.
:param outloc: output locations 'temp' for the virtual files
:return: list of numpy arrays
"""
outputs = []
npy_outputs = []
if resamplemethod == 'nearest':
rs = Resampling.nearest
else:
print('only nearest neighbor resampling is supported at this time')
sys.exit(0)
for i, warpfile in enumerate(inputs):
# print('warpfile', warpfile)
with rasterio.open(warpfile) as src:
# TODO - make the default configurable.
# if src.crs == None:
# src.crs = CRS.from_epsg(4326)
# create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.
with WarpedVRT(src, resampling=rs,
crs=self.crs,
transform=self.transform,
height=self.rows,
width=self.cols) as vrt:
data = vrt.read()
# print(type(vrt))
# save the file as an enumerated tiff. reopen outside this loop with the outputs list
outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))
rio_shutil.copy(vrt, outwarp, driver='GTiff')
outputs.append(outwarp)
# output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.
# for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays
# from this method for us in the rest of the code.
for ow in outputs:
with rasterio.open(ow, 'r') as src:
arr = src.read(1)
npy_outputs.append(arr)
return npy_outputs
def | (self, warpfile, rs):
t0 = t_now()
cnt=10
while(cnt>0):
try:
with rasterio.open(warpfile) as src:
if src.crs == None:
src.crs = CRS.from_epsg(4326)
# create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.
with WarpedVRT(src, resampling=rs,
crs=self.crs,
transform=self.transform,
height=self.rows,
width=self.cols) as vrt:
data = vrt.read(1)
# print(type(vrt))
print("data shape =", data.shape)
self.log.info("_warp_one Completed {}".format(warpfile))
t_total = t_now() - t0
self.log.info("WARP - TIME - {} - {}".format(t_total, warpfile))
return data
except rasterio.errors.RasterioIOError:
print("Unexpected error:", sys.exc_info()[0])
print('oops',cnt)
cnt = cnt - 1
time.sleep(4)
def _warp_inputs(self, inputs, resamplemethod):
self.log.info("_warp_inputs")
outputs = []
npy_outputs = []
if resamplemethod == 'nearest':
rs = Resampling.nearest
else:
print('only nearest neighbor resampling is supported at this time')
sys.exit(0)
for i, warpfile in enumerate(inputs):
print('warpfile', warpfile)
if (self.optimize):
data = self.opti.o_warp_one(warpfile, rs, self.crs, self.transform, self.rows, self.cols)
else:
data = self._warp_one(warpfile, rs)
npy_outputs.append(data)
return npy_outputs
def scale_rasters(self, numpys, scalefactors):
vals = []
for arr, sc in zip(numpys, scalefactors):
arr *= sc
vals.append(arr)
return vals
def normalize_to_std_grid_fast(self, inputs, resamplemethod='nearest'):
"""
Uses rasterio virtual raster to standardize grids of different crs, resolution, boundaries based on a shapefile geometry feature
:param inputs: a list of (daily) raster input files for the water balance.
:param outloc: output locations 'temp' for the virtual files
:return: list of numpy arrays
"""
npy_outputs = self._warp_inputs(inputs, resamplemethod)
return npy_outputs
def s3_delete_local(self, local_file, bucket, bucket_filepath):
"""
This function will move the model outputs from a local folder to a cloud bucket.
:param local_file: path the the local geo file
:param outpath: path of a directory to be created in the cloud bucket
:param bucket: name of the cloud bucket = 'dev-et-data'
:param bucket_folder: "folder" in cloud bucket = 'v1DRB_outputs'
:return:
"""
s3 = boto3.client('s3')
with open(local_file, "rb") as f:
if 'vsis3' in bucket:
bucket = bucket.split('/')[-1]
print(bucket, bucket_filepath)
s3.upload_fileobj(f, bucket, bucket_filepath)
os.remove(local_file)
| _warp_one | identifier_name |
rastermanager.py | import os
import sys
import numpy as np
import time
import yaml
import calendar
from datetime import datetime, timedelta, date
from s3fs.core import S3FileSystem
import boto3
import fiona
import pandas as pd
import rasterio.mask
from rasterio.crs import CRS
from rasterio.enums import Resampling
from rasterio import shutil as rio_shutil
from rasterio.vrt import WarpedVRT
from timeit import default_timer as t_now
from .pathmanager import PathManager
from .box_poly import box_create_ugly_proprietary_shapefile_plus_json_from_tile
from .log_logger import log_make_logger
from .optimeister import OptiMeister
class RasterManager:
"""
This class addresses all the data wrangling needs to get the data sets needed in the model
to the extent nad resolution chosen by the user.
The user needs to input a sample shapefile (polygon) and geotiff file that is used to
get the attributes for processing extent and resolution, etc.
"""
# TODO - get large extent tif files for testing the RasterManager warping functions
# TODO - make overlapping of 2 dataset work
# TODO - rasterio either clips based on a sample raster and shapefile, or based on user defined geo info
# #TODO - https://rasterio.readthedocs.io/en/latest/topics/masking-by-shapefile.html
# Todo - test raster manager as a standalone module.
out_root = None
# --- geographic info for destination files ---
crs = None
cols = None
rows = None
xres = None
yres = None
# left geo coord 'e.g. Westernmost Longitude"
left = None
# top geo coord e.g highest Latitude extent
top = None
transform = [xres, 0.0, left, 0.0, yres, top]
geoproperties_file = None
# can contain multiple features of interest
shapefile = None
temp_folder = None
def __init__(self, config_dict, shp=None):
self.log = log_make_logger('RASTER MANAGER LOG')
self.optimize = False
self.config_dict = config_dict
tile = self.config_dict['tile']
self.log.info('tile name is - {}'.format(tile))
if 'tile' in tile:
self.log.info("using scalable tile names {}".format(tile))
#bucket_name = self.config_dict['out_root'].split('/')[0]
#today = date.today()
#print("Current date =", today)
#date_str=today.strftime("%m_%d_%Y")
#self.config_dict['out_root'] = bucket_name + '/out/DelawareRiverBasin/Run' + date_str + '/' + tile
if self.config_dict['optimize']:
self.optimize = True
self.opti=OptiMeister(config_dict,shp)
# self.geoproperties_file = config_dict.geoproperties_file
# self.shapefile = config_dict.shapefile
# self.temp_folder = os.path.join(config_dict.out_root, config_dict.temp_folder)
# self.temp_folder = config_dict['temp_folder']
self.temp_folder = './' + tile
self.log.info('temp folder is'.format(self.temp_folder))
if not os.path.exists(self.temp_folder):
os.makedirs(self.temp_folder)
# if the user does not include a shapefile in VegET, a box based on the tile name will be created.
if shp == None:
self.shapefile = box_create_ugly_proprietary_shapefile_plus_json_from_tile(self.temp_folder, tile)
else:
self.shapefile = shp
self.geoproperties_file = config_dict['geoproperties_file']
if self.geoproperties_file == None or self.shapefile==None:
print('Assuming the user entered values in the config_dict for boundaries of the AOI not implemented at thsi time')
sys.exit(0)
def output_rasters_cloud(self, arr, outname):
"""
This function creates geotiff files from the model output arrays.
"""
if self.config_dict['path_mode'] == 'aws':
# later on deleted by s3_delete_local()
# local_outpath = os.path.join(self.config_dict['temp_folder'], outname)
local_outname = outname.split('/')[-1]
local_outpath = os.path.join(self.temp_folder, local_outname)
self.log.debug('local_outpath {}'.format(local_outpath))
t0 = t_now()
band1 = arr
# write to a temp folder
with rasterio.open(local_outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,
count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:
wrast.write(band1, indexes=1)
# Buckets are not directories but you can treat them like they are
# bucket_name = os.path.split(self.config_dict['out_root'])[0] # dev-et-data
# bucket_prefix = os.path.split(self.config_dict['out_root'])[-1] # tile_modelrun1
bucket_name = self.config_dict['out_root'].split('/')[0]
bucket_prefix_list = self.config_dict['out_root'].split('/')[1:]
print(bucket_prefix_list)
bucket_prefix = '/'.join(bucket_prefix_list)
print("bucket prefix =", bucket_prefix)
bucket_filepath = os.path.join(bucket_prefix, outname) # os.path.join(dev-et-data/tile_modelrun1, outname)
# uploads to aws bucket with filepath
self.s3_delete_local(local_file=local_outpath, bucket=bucket_name, bucket_filepath=bucket_filepath)
t_total = t_now() - t0
self.log.info("OUTPUT - TIME - {} - {}".format(t_total, bucket_filepath))
elif self.config_dict['path_mode'] == 'google':
print('google path mode not yet implemented')
sys.exit(0)
else:
print('PATH MODE in config is not set properly for the cloud implementation of output_Rasters')
sys.exit(0)
# ----------- create output rasters -----------------
def output_rasters(self, arr, outdir, outname):
"""
This function creates geotiff files from the model output arrays.
"""
# make the subdirectories if we need 'em
if not os.path.exists(outdir):
os.makedirs(outdir)
if self.config_dict['path_mode'] == 'local':
outpath = os.path.join(outdir, outname)
print('the outpath for file {} is {}'.format(outname, outpath))
band1 = arr
with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,
count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:
wrast.write(band1, indexes=1)
else:
print('PATH MODE in config is not set properly for the local implementation of output_Rasters')
sys.exit(0)
def set_model_std_grid(self, feat=0):
"""Clips and crops a tiff to the extent of a feature in a shapefile
:param feat: feat is the feature id of the shapefile from like a GeoJSON)
# https://rasterio.readthedocs.io/en/latest/topics/virtual-warping.html
"""
print(self.shapefile)
with fiona.open(self.shapefile, 'r') as shapefile:
# todo - set up an error if user has shapefile with more than one feature. GELP n STEFFI
# shape = shapefile[0]['geometry']
shapes = [feature["geometry"] for feature in shapefile]
for feature in shapefile:
# matching the FID of the given shapefile from a typical geoJSON (Not Ordered Dict nonsense)
if feat == feature['id']:
shapes = [feature['geometry']]
print(f'geoproperties file {self.geoproperties_file}')
print('This is the shape var:', shapes)
with rasterio.open(self.geoproperties_file, 'r') as src:
out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True)
out_meta = src.meta
# once the image is cropped, the image metadata dictionary is updated with the cropped transform and bounds.
out_meta.update({"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform})
self.crs = out_meta['crs']
# TODO - Set Blocksize for sample raster and other useful optimization thingys
self.transform = out_meta['transform']
self.left = self.transform[2]
self.top = self .transform[5]
self.cols = out_meta['width']
self.rows = out_meta['height']
self.xres = self.transform[0]
self.yres = self.transform[4]
# return out_meta
def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):
"""
Uses rasterio virtual raster to standardize grids of different crs, resolution, boundaries based on a shapefile geometry feature
:param inputs: a list of (daily) raster input files for the water balance.
:param outloc: output locations 'temp' for the virtual files
:return: list of numpy arrays
"""
outputs = []
npy_outputs = []
if resamplemethod == 'nearest':
rs = Resampling.nearest
else:
print('only nearest neighbor resampling is supported at this time')
sys.exit(0)
for i, warpfile in enumerate(inputs):
# print('warpfile', warpfile)
with rasterio.open(warpfile) as src:
# TODO - make the default configurable.
# if src.crs == None:
# src.crs = CRS.from_epsg(4326)
# create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.
with WarpedVRT(src, resampling=rs,
crs=self.crs,
transform=self.transform,
height=self.rows,
width=self.cols) as vrt:
data = vrt.read()
# print(type(vrt))
# save the file as an enumerated tiff. reopen outside this loop with the outputs list
outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))
rio_shutil.copy(vrt, outwarp, driver='GTiff')
outputs.append(outwarp)
# output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.
# for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays
# from this method for us in the rest of the code.
for ow in outputs:
with rasterio.open(ow, 'r') as src:
arr = src.read(1)
npy_outputs.append(arr)
return npy_outputs
def _warp_one(self, warpfile, rs):
t0 = t_now()
cnt=10
while(cnt>0):
try:
with rasterio.open(warpfile) as src:
if src.crs == None:
src.crs = CRS.from_epsg(4326)
# create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.
with WarpedVRT(src, resampling=rs,
crs=self.crs,
transform=self.transform,
height=self.rows,
width=self.cols) as vrt:
data = vrt.read(1)
# print(type(vrt))
print("data shape =", data.shape)
self.log.info("_warp_one Completed {}".format(warpfile))
t_total = t_now() - t0
self.log.info("WARP - TIME - {} - {}".format(t_total, warpfile))
return data
except rasterio.errors.RasterioIOError:
print("Unexpected error:", sys.exc_info()[0])
print('oops',cnt)
cnt = cnt - 1
time.sleep(4)
def _warp_inputs(self, inputs, resamplemethod):
self.log.info("_warp_inputs")
outputs = []
npy_outputs = []
if resamplemethod == 'nearest':
rs = Resampling.nearest
else:
print('only nearest neighbor resampling is supported at this time')
sys.exit(0)
for i, warpfile in enumerate(inputs):
print('warpfile', warpfile)
if (self.optimize):
data = self.opti.o_warp_one(warpfile, rs, self.crs, self.transform, self.rows, self.cols)
else:
data = self._warp_one(warpfile, rs)
npy_outputs.append(data)
return npy_outputs
def scale_rasters(self, numpys, scalefactors):
vals = []
for arr, sc in zip(numpys, scalefactors):
arr *= sc
vals.append(arr)
return vals
def normalize_to_std_grid_fast(self, inputs, resamplemethod='nearest'):
|
def s3_delete_local(self, local_file, bucket, bucket_filepath):
"""
This function will move the model outputs from a local folder to a cloud bucket.
:param local_file: path the the local geo file
:param outpath: path of a directory to be created in the cloud bucket
:param bucket: name of the cloud bucket = 'dev-et-data'
:param bucket_folder: "folder" in cloud bucket = 'v1DRB_outputs'
:return:
"""
s3 = boto3.client('s3')
with open(local_file, "rb") as f:
if 'vsis3' in bucket:
bucket = bucket.split('/')[-1]
print(bucket, bucket_filepath)
s3.upload_fileobj(f, bucket, bucket_filepath)
os.remove(local_file)
| """
Uses rasterio virtual raster to standardize grids of different crs, resolution, boundaries based on a shapefile geometry feature
:param inputs: a list of (daily) raster input files for the water balance.
:param outloc: output locations 'temp' for the virtual files
:return: list of numpy arrays
"""
npy_outputs = self._warp_inputs(inputs, resamplemethod)
return npy_outputs | identifier_body |
tools.rs | //! Download management for external tools and applications. Locate and automatically download
//! applications (if needed) to use them in the build pipeline.
use std::path::{Path, PathBuf};
use anyhow::{anyhow, bail, ensure, Context, Result};
use async_compression::tokio::bufread::GzipDecoder;
use directories_next::ProjectDirs;
use futures::prelude::*;
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncSeekExt, AsyncWriteExt, BufReader, SeekFrom};
use tokio::process::Command;
use tokio_tar::{Archive, Entry};
use crate::common::is_executable;
/// The application to locate and eventually download when calling [`get`].
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Application {
/// wasm-bindgen for generating the JS bindings.
WasmBindgen,
/// wasm-opt to improve performance and size of the output file further.
WasmOpt,
}
impl Application {
/// Base name of the executable without extension.
pub(crate) fn name(&self) -> &str {
match self {
Self::WasmBindgen => "wasm-bindgen",
Self::WasmOpt => "wasm-opt",
}
}
/// Path of the executable within the downloaded archive.
fn path(&self) -> &str {
if cfg!(windows) {
match self {
Self::WasmBindgen => "wasm-bindgen.exe",
Self::WasmOpt => "bin/wasm-opt.exe",
}
} else {
match self {
Self::WasmBindgen => "wasm-bindgen",
Self::WasmOpt => "bin/wasm-opt",
}
}
}
/// Additonal files included in the archive that are required to run the main binary.
fn extra_paths(&self) -> &[&str] {
if cfg!(target_os = "macos") && *self == Self::WasmOpt {
&["lib/libbinaryen.dylib"]
} else {
&[]
}
}
/// Default version to use if not set by the user.
fn default_version(&self) -> &str {
match self {
Self::WasmBindgen => "0.2.74",
Self::WasmOpt => "version_101",
}
}
/// Target for the current OS as part of the download URL. Can fail as there might be no release
/// for the current platform.
fn target(&self) -> Result<&str> {
Ok(match self {
Self::WasmBindgen => {
if cfg!(target_os = "windows") {
"pc-windows-msvc"
} else if cfg!(target_os = "macos") {
"apple-darwin"
} else if cfg!(target_os = "linux") {
"unknown-linux-musl"
} else {
bail!("unsupported OS")
}
}
Self::WasmOpt => {
if cfg!(target_os = "windows") {
"windows"
} else if cfg!(target_os = "macos") {
"macos"
} else if cfg!(target_os = "linux") {
"linux"
} else {
bail!("unsupported OS")
}
}
})
}
/// Direct URL to the release of an application for download.
fn url(&self, version: &str) -> Result<String> {
Ok(match self {
Self::WasmBindgen => format!(
"https://github.com/rustwasm/wasm-bindgen/releases/download/{version}/wasm-bindgen-{version}-x86_64-{target}.tar.gz",
version = version,
target = self.target()?
),
Self::WasmOpt => format!(
"https://github.com/WebAssembly/binaryen/releases/download/{version}/binaryen-{version}-x86_64-{target}.tar.gz",
version = version,
target = self.target()?,
),
})
}
/// The CLI subcommand, flag or option used to check the application's version.
fn version_test(&self) -> &'static str {
match self {
Application::WasmBindgen => "--version",
Application::WasmOpt => "--version",
}
}
/// Format the output of version checking the app.
fn format_version_output(&self, text: &str) -> Result<String> { | .nth(1)
.with_context(|| format!("missing or malformed version output: {}", text))?
.to_owned(),
Application::WasmOpt => format!(
"version_{}",
text.split(' ')
.nth(2)
.with_context(|| format!("missing or malformed version output: {}", text))?
),
};
Ok(formatted_version)
}
}
/// Locate the given application and download it if missing.
#[tracing::instrument(level = "trace")]
pub async fn get(app: Application, version: Option<&str>) -> Result<PathBuf> {
let version = version.unwrap_or_else(|| app.default_version());
if let Some(path) = find_system(app, version).await {
tracing::info!(app = app.name(), version = version, "using system installed binary");
return Ok(path);
}
let cache_dir = cache_dir().await?;
let app_dir = cache_dir.join(format!("{}-{}", app.name(), version));
let bin_path = app_dir.join(app.path());
if !is_executable(&bin_path).await? {
let path = download(app, version)
.await
.context("failed downloading release archive")?;
let mut file = File::open(&path).await.context("failed opening downloaded file")?;
install(app, &mut file, &app_dir).await?;
tokio::fs::remove_file(path)
.await
.context("failed deleting temporary archive")?;
}
Ok(bin_path)
}
/// Try to find a globally system installed version of the application and ensure it is the needed
/// release version.
#[tracing::instrument(level = "trace")]
async fn find_system(app: Application, version: &str) -> Option<PathBuf> {
let result = || async {
let path = which::which(app.name())?;
let output = Command::new(&path).arg(app.version_test()).output().await?;
ensure!(
output.status.success(),
"running command `{} {}` failed",
path.display(),
app.version_test()
);
let text = String::from_utf8_lossy(&output.stdout);
let system_version = app.format_version_output(&text)?;
Ok((path, system_version))
};
match result().await {
Ok((path, system_version)) => (system_version == version).then(|| path),
Err(e) => {
tracing::debug!("system version not found for {}: {}", app.name(), e);
None
}
}
}
/// Download a file from its remote location in the given version, extract it and make it ready for
/// execution at the given location.
#[tracing::instrument(level = "trace")]
async fn download(app: Application, version: &str) -> Result<PathBuf> {
tracing::info!(version = version, "downloading {}", app.name());
let cache_dir = cache_dir().await.context("failed getting the cache directory")?;
let temp_out = cache_dir.join(format!("{}-{}.tmp", app.name(), version));
let mut file = File::create(&temp_out)
.await
.context("failed creating temporary output file")?;
let resp = reqwest::get(app.url(version)?)
.await
.context("error sending HTTP request")?;
ensure!(
resp.status().is_success(),
"error downloading archive file: {:?}\n{}",
resp.status(),
app.url(version)?
);
let mut res_bytes = resp.bytes_stream();
while let Some(chunk_res) = res_bytes.next().await {
let chunk = chunk_res.context("error reading chunk from download")?;
let _res = file.write(chunk.as_ref()).await;
}
Ok(temp_out)
}
/// Install an application from a downloaded archive locating and copying it to the given target
/// location.
#[tracing::instrument(level = "trace")]
async fn install(app: Application, archive_file: &mut File, target: &Path) -> Result<()> {
tracing::info!("installing {}", app.name());
let mut archive = Archive::new(GzipDecoder::new(BufReader::new(archive_file)));
let mut file = extract_file(&mut archive, target, Path::new(app.path())).await?;
set_executable_flag(&mut file).await?;
for path in app.extra_paths() {
// Archive must be opened for each entry as tar files don't allow jumping forth and back.
let mut archive_file = archive
.into_inner()
.map_err(|_| anyhow!("error seeking app archive"))?
.into_inner();
archive_file
.seek(SeekFrom::Start(0))
.await
.context("error seeking to beginning of archive")?;
archive = Archive::new(GzipDecoder::new(archive_file));
extract_file(&mut archive, target, Path::new(path)).await?;
}
Ok(())
}
/// Extract a single file from the given archive and put it into the target location.
async fn extract_file<R>(archive: &mut Archive<R>, target: &Path, file: &Path) -> Result<File>
where
R: AsyncRead + Unpin + Send + Sync,
{
let mut tar_file = find_tar_entry(archive, file).await?.context("file not found in archive")?;
let out = target.join(file);
if let Some(parent) = out.parent() {
tokio::fs::create_dir_all(parent)
.await
.context("failed creating output directory")?;
}
let mut out = File::create(target.join(file))
.await
.context("failed creating output file")?;
tokio::io::copy(&mut tar_file, &mut out)
.await
.context("failed copying over final output file from archive")?;
Ok(out)
}
/// Locate the cache dir for trunk and make sure it exists.
pub async fn cache_dir() -> Result<PathBuf> {
let path = ProjectDirs::from("dev", "trunkrs", "trunk")
.context("failed finding project directory")?
.cache_dir()
.to_owned();
tokio::fs::create_dir_all(&path)
.await
.context("failed creating cache directory")?;
Ok(path)
}
/// Set the executable flag for a file. Only has an effect on UNIX platforms.
async fn set_executable_flag(file: &mut File) -> Result<()> {
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = file.metadata().await.context("failed getting metadata")?.permissions();
perms.set_mode(perms.mode() | 0o100);
file.set_permissions(perms)
.await
.context("failed setting the executable flag")?;
}
Ok(())
}
/// Find an entry in a TAR archive by name and open it for reading. The first part of the path is
/// dropped as that's usually the folder name it was created from.
async fn find_tar_entry<R>(archive: &mut Archive<R>, path: impl AsRef<Path>) -> Result<Option<Entry<Archive<R>>>>
where
R: AsyncRead + Unpin + Send + Sync,
{
let mut entries = archive.entries().context("failed getting archive entries")?;
while let Some(entry) = entries.next().await {
let entry = entry.context("error while getting archive entry")?;
let name = entry.path().context("invalid entry path")?;
let mut name = name.components();
name.next();
if name.as_path() == path.as_ref() {
return Ok(Some(entry));
}
}
Ok(None)
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::{ensure, Context, Result};
#[tokio::test]
async fn download_and_install_binaries() -> Result<()> {
let dir = tempfile::tempdir().context("error creating temporary dir")?;
for &app in &[Application::WasmBindgen, Application::WasmOpt] {
let path = download(app, app.default_version())
.await
.context("error downloading app")?;
let mut file = File::open(&path).await.context("error opening file")?;
install(app, &mut file, dir.path()).await.context("error installing app")?;
std::fs::remove_file(path).context("error during cleanup")?;
}
Ok(())
}
macro_rules! table_test_format_version {
($name:ident, $app:expr, $input:literal, $expect:literal) => {
#[test]
fn $name() -> Result<()> {
let app = $app;
let output = app
.format_version_output($input)
.context("unexpected version formatting error")?;
ensure!(output == $expect, "version check output does not match: {} != {}", $expect, output);
Ok(())
}
};
}
table_test_format_version!(
wasm_opt_from_source,
Application::WasmOpt,
"wasm-opt version 101 (version_101)",
"version_101"
);
table_test_format_version!(wasm_opt_pre_compiled, Application::WasmOpt, "wasm-opt version 101", "version_101");
table_test_format_version!(wasm_bindgen_from_source, Application::WasmBindgen, "wasm-bindgen 0.2.75", "0.2.75");
table_test_format_version!(
wasm_bindgen_pre_compiled,
Application::WasmBindgen,
"wasm-bindgen 0.2.74 (27c7a4d06)",
"0.2.74"
);
} | let text = text.trim();
let formatted_version = match self {
Application::WasmBindgen => text
.split(' ') | random_line_split |
tools.rs | //! Download management for external tools and applications. Locate and automatically download
//! applications (if needed) to use them in the build pipeline.
use std::path::{Path, PathBuf};
use anyhow::{anyhow, bail, ensure, Context, Result};
use async_compression::tokio::bufread::GzipDecoder;
use directories_next::ProjectDirs;
use futures::prelude::*;
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncSeekExt, AsyncWriteExt, BufReader, SeekFrom};
use tokio::process::Command;
use tokio_tar::{Archive, Entry};
use crate::common::is_executable;
/// The application to locate and eventually download when calling [`get`].
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Application {
/// wasm-bindgen for generating the JS bindings.
WasmBindgen,
/// wasm-opt to improve performance and size of the output file further.
WasmOpt,
}
impl Application {
/// Base name of the executable without extension.
pub(crate) fn name(&self) -> &str {
match self {
Self::WasmBindgen => "wasm-bindgen",
Self::WasmOpt => "wasm-opt",
}
}
/// Path of the executable within the downloaded archive.
fn path(&self) -> &str {
if cfg!(windows) {
match self {
Self::WasmBindgen => "wasm-bindgen.exe",
Self::WasmOpt => "bin/wasm-opt.exe",
}
} else {
match self {
Self::WasmBindgen => "wasm-bindgen",
Self::WasmOpt => "bin/wasm-opt",
}
}
}
/// Additonal files included in the archive that are required to run the main binary.
fn extra_paths(&self) -> &[&str] {
if cfg!(target_os = "macos") && *self == Self::WasmOpt {
&["lib/libbinaryen.dylib"]
} else {
&[]
}
}
/// Default version to use if not set by the user.
fn default_version(&self) -> &str {
match self {
Self::WasmBindgen => "0.2.74",
Self::WasmOpt => "version_101",
}
}
/// Target for the current OS as part of the download URL. Can fail as there might be no release
/// for the current platform.
fn target(&self) -> Result<&str> {
Ok(match self {
Self::WasmBindgen => {
if cfg!(target_os = "windows") {
"pc-windows-msvc"
} else if cfg!(target_os = "macos") {
"apple-darwin"
} else if cfg!(target_os = "linux") {
"unknown-linux-musl"
} else {
bail!("unsupported OS")
}
}
Self::WasmOpt => {
if cfg!(target_os = "windows") {
"windows"
} else if cfg!(target_os = "macos") {
"macos"
} else if cfg!(target_os = "linux") {
"linux"
} else {
bail!("unsupported OS")
}
}
})
}
/// Direct URL to the release of an application for download.
fn | (&self, version: &str) -> Result<String> {
Ok(match self {
Self::WasmBindgen => format!(
"https://github.com/rustwasm/wasm-bindgen/releases/download/{version}/wasm-bindgen-{version}-x86_64-{target}.tar.gz",
version = version,
target = self.target()?
),
Self::WasmOpt => format!(
"https://github.com/WebAssembly/binaryen/releases/download/{version}/binaryen-{version}-x86_64-{target}.tar.gz",
version = version,
target = self.target()?,
),
})
}
/// The CLI subcommand, flag or option used to check the application's version.
fn version_test(&self) -> &'static str {
match self {
Application::WasmBindgen => "--version",
Application::WasmOpt => "--version",
}
}
/// Format the output of version checking the app.
fn format_version_output(&self, text: &str) -> Result<String> {
let text = text.trim();
let formatted_version = match self {
Application::WasmBindgen => text
.split(' ')
.nth(1)
.with_context(|| format!("missing or malformed version output: {}", text))?
.to_owned(),
Application::WasmOpt => format!(
"version_{}",
text.split(' ')
.nth(2)
.with_context(|| format!("missing or malformed version output: {}", text))?
),
};
Ok(formatted_version)
}
}
/// Locate the given application and download it if missing.
#[tracing::instrument(level = "trace")]
pub async fn get(app: Application, version: Option<&str>) -> Result<PathBuf> {
let version = version.unwrap_or_else(|| app.default_version());
if let Some(path) = find_system(app, version).await {
tracing::info!(app = app.name(), version = version, "using system installed binary");
return Ok(path);
}
let cache_dir = cache_dir().await?;
let app_dir = cache_dir.join(format!("{}-{}", app.name(), version));
let bin_path = app_dir.join(app.path());
if !is_executable(&bin_path).await? {
let path = download(app, version)
.await
.context("failed downloading release archive")?;
let mut file = File::open(&path).await.context("failed opening downloaded file")?;
install(app, &mut file, &app_dir).await?;
tokio::fs::remove_file(path)
.await
.context("failed deleting temporary archive")?;
}
Ok(bin_path)
}
/// Try to find a globally system installed version of the application and ensure it is the needed
/// release version.
#[tracing::instrument(level = "trace")]
async fn find_system(app: Application, version: &str) -> Option<PathBuf> {
let result = || async {
let path = which::which(app.name())?;
let output = Command::new(&path).arg(app.version_test()).output().await?;
ensure!(
output.status.success(),
"running command `{} {}` failed",
path.display(),
app.version_test()
);
let text = String::from_utf8_lossy(&output.stdout);
let system_version = app.format_version_output(&text)?;
Ok((path, system_version))
};
match result().await {
Ok((path, system_version)) => (system_version == version).then(|| path),
Err(e) => {
tracing::debug!("system version not found for {}: {}", app.name(), e);
None
}
}
}
/// Download a file from its remote location in the given version, extract it and make it ready for
/// execution at the given location.
#[tracing::instrument(level = "trace")]
async fn download(app: Application, version: &str) -> Result<PathBuf> {
tracing::info!(version = version, "downloading {}", app.name());
let cache_dir = cache_dir().await.context("failed getting the cache directory")?;
let temp_out = cache_dir.join(format!("{}-{}.tmp", app.name(), version));
let mut file = File::create(&temp_out)
.await
.context("failed creating temporary output file")?;
let resp = reqwest::get(app.url(version)?)
.await
.context("error sending HTTP request")?;
ensure!(
resp.status().is_success(),
"error downloading archive file: {:?}\n{}",
resp.status(),
app.url(version)?
);
let mut res_bytes = resp.bytes_stream();
while let Some(chunk_res) = res_bytes.next().await {
let chunk = chunk_res.context("error reading chunk from download")?;
let _res = file.write(chunk.as_ref()).await;
}
Ok(temp_out)
}
/// Install an application from a downloaded archive locating and copying it to the given target
/// location.
#[tracing::instrument(level = "trace")]
async fn install(app: Application, archive_file: &mut File, target: &Path) -> Result<()> {
tracing::info!("installing {}", app.name());
let mut archive = Archive::new(GzipDecoder::new(BufReader::new(archive_file)));
let mut file = extract_file(&mut archive, target, Path::new(app.path())).await?;
set_executable_flag(&mut file).await?;
for path in app.extra_paths() {
// Archive must be opened for each entry as tar files don't allow jumping forth and back.
let mut archive_file = archive
.into_inner()
.map_err(|_| anyhow!("error seeking app archive"))?
.into_inner();
archive_file
.seek(SeekFrom::Start(0))
.await
.context("error seeking to beginning of archive")?;
archive = Archive::new(GzipDecoder::new(archive_file));
extract_file(&mut archive, target, Path::new(path)).await?;
}
Ok(())
}
/// Extract a single file from the given archive and put it into the target location.
async fn extract_file<R>(archive: &mut Archive<R>, target: &Path, file: &Path) -> Result<File>
where
R: AsyncRead + Unpin + Send + Sync,
{
let mut tar_file = find_tar_entry(archive, file).await?.context("file not found in archive")?;
let out = target.join(file);
if let Some(parent) = out.parent() {
tokio::fs::create_dir_all(parent)
.await
.context("failed creating output directory")?;
}
let mut out = File::create(target.join(file))
.await
.context("failed creating output file")?;
tokio::io::copy(&mut tar_file, &mut out)
.await
.context("failed copying over final output file from archive")?;
Ok(out)
}
/// Locate the cache dir for trunk and make sure it exists.
pub async fn cache_dir() -> Result<PathBuf> {
let path = ProjectDirs::from("dev", "trunkrs", "trunk")
.context("failed finding project directory")?
.cache_dir()
.to_owned();
tokio::fs::create_dir_all(&path)
.await
.context("failed creating cache directory")?;
Ok(path)
}
/// Set the executable flag for a file. Only has an effect on UNIX platforms.
async fn set_executable_flag(file: &mut File) -> Result<()> {
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = file.metadata().await.context("failed getting metadata")?.permissions();
perms.set_mode(perms.mode() | 0o100);
file.set_permissions(perms)
.await
.context("failed setting the executable flag")?;
}
Ok(())
}
/// Find an entry in a TAR archive by name and open it for reading. The first part of the path is
/// dropped as that's usually the folder name it was created from.
async fn find_tar_entry<R>(archive: &mut Archive<R>, path: impl AsRef<Path>) -> Result<Option<Entry<Archive<R>>>>
where
R: AsyncRead + Unpin + Send + Sync,
{
let mut entries = archive.entries().context("failed getting archive entries")?;
while let Some(entry) = entries.next().await {
let entry = entry.context("error while getting archive entry")?;
let name = entry.path().context("invalid entry path")?;
let mut name = name.components();
name.next();
if name.as_path() == path.as_ref() {
return Ok(Some(entry));
}
}
Ok(None)
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::{ensure, Context, Result};
#[tokio::test]
async fn download_and_install_binaries() -> Result<()> {
let dir = tempfile::tempdir().context("error creating temporary dir")?;
for &app in &[Application::WasmBindgen, Application::WasmOpt] {
let path = download(app, app.default_version())
.await
.context("error downloading app")?;
let mut file = File::open(&path).await.context("error opening file")?;
install(app, &mut file, dir.path()).await.context("error installing app")?;
std::fs::remove_file(path).context("error during cleanup")?;
}
Ok(())
}
macro_rules! table_test_format_version {
($name:ident, $app:expr, $input:literal, $expect:literal) => {
#[test]
fn $name() -> Result<()> {
let app = $app;
let output = app
.format_version_output($input)
.context("unexpected version formatting error")?;
ensure!(output == $expect, "version check output does not match: {} != {}", $expect, output);
Ok(())
}
};
}
table_test_format_version!(
wasm_opt_from_source,
Application::WasmOpt,
"wasm-opt version 101 (version_101)",
"version_101"
);
table_test_format_version!(wasm_opt_pre_compiled, Application::WasmOpt, "wasm-opt version 101", "version_101");
table_test_format_version!(wasm_bindgen_from_source, Application::WasmBindgen, "wasm-bindgen 0.2.75", "0.2.75");
table_test_format_version!(
wasm_bindgen_pre_compiled,
Application::WasmBindgen,
"wasm-bindgen 0.2.74 (27c7a4d06)",
"0.2.74"
);
}
| url | identifier_name |
tools.rs | //! Download management for external tools and applications. Locate and automatically download
//! applications (if needed) to use them in the build pipeline.
use std::path::{Path, PathBuf};
use anyhow::{anyhow, bail, ensure, Context, Result};
use async_compression::tokio::bufread::GzipDecoder;
use directories_next::ProjectDirs;
use futures::prelude::*;
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncSeekExt, AsyncWriteExt, BufReader, SeekFrom};
use tokio::process::Command;
use tokio_tar::{Archive, Entry};
use crate::common::is_executable;
/// The application to locate and eventually download when calling [`get`].
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Application {
/// wasm-bindgen for generating the JS bindings.
WasmBindgen,
/// wasm-opt to improve performance and size of the output file further.
WasmOpt,
}
impl Application {
/// Base name of the executable without extension.
pub(crate) fn name(&self) -> &str |
/// Path of the executable within the downloaded archive.
fn path(&self) -> &str {
if cfg!(windows) {
match self {
Self::WasmBindgen => "wasm-bindgen.exe",
Self::WasmOpt => "bin/wasm-opt.exe",
}
} else {
match self {
Self::WasmBindgen => "wasm-bindgen",
Self::WasmOpt => "bin/wasm-opt",
}
}
}
/// Additonal files included in the archive that are required to run the main binary.
fn extra_paths(&self) -> &[&str] {
if cfg!(target_os = "macos") && *self == Self::WasmOpt {
&["lib/libbinaryen.dylib"]
} else {
&[]
}
}
/// Default version to use if not set by the user.
fn default_version(&self) -> &str {
match self {
Self::WasmBindgen => "0.2.74",
Self::WasmOpt => "version_101",
}
}
/// Target for the current OS as part of the download URL. Can fail as there might be no release
/// for the current platform.
fn target(&self) -> Result<&str> {
Ok(match self {
Self::WasmBindgen => {
if cfg!(target_os = "windows") {
"pc-windows-msvc"
} else if cfg!(target_os = "macos") {
"apple-darwin"
} else if cfg!(target_os = "linux") {
"unknown-linux-musl"
} else {
bail!("unsupported OS")
}
}
Self::WasmOpt => {
if cfg!(target_os = "windows") {
"windows"
} else if cfg!(target_os = "macos") {
"macos"
} else if cfg!(target_os = "linux") {
"linux"
} else {
bail!("unsupported OS")
}
}
})
}
/// Direct URL to the release of an application for download.
fn url(&self, version: &str) -> Result<String> {
Ok(match self {
Self::WasmBindgen => format!(
"https://github.com/rustwasm/wasm-bindgen/releases/download/{version}/wasm-bindgen-{version}-x86_64-{target}.tar.gz",
version = version,
target = self.target()?
),
Self::WasmOpt => format!(
"https://github.com/WebAssembly/binaryen/releases/download/{version}/binaryen-{version}-x86_64-{target}.tar.gz",
version = version,
target = self.target()?,
),
})
}
/// The CLI subcommand, flag or option used to check the application's version.
fn version_test(&self) -> &'static str {
match self {
Application::WasmBindgen => "--version",
Application::WasmOpt => "--version",
}
}
/// Format the output of version checking the app.
fn format_version_output(&self, text: &str) -> Result<String> {
let text = text.trim();
let formatted_version = match self {
Application::WasmBindgen => text
.split(' ')
.nth(1)
.with_context(|| format!("missing or malformed version output: {}", text))?
.to_owned(),
Application::WasmOpt => format!(
"version_{}",
text.split(' ')
.nth(2)
.with_context(|| format!("missing or malformed version output: {}", text))?
),
};
Ok(formatted_version)
}
}
/// Locate the given application and download it if missing.
#[tracing::instrument(level = "trace")]
pub async fn get(app: Application, version: Option<&str>) -> Result<PathBuf> {
let version = version.unwrap_or_else(|| app.default_version());
if let Some(path) = find_system(app, version).await {
tracing::info!(app = app.name(), version = version, "using system installed binary");
return Ok(path);
}
let cache_dir = cache_dir().await?;
let app_dir = cache_dir.join(format!("{}-{}", app.name(), version));
let bin_path = app_dir.join(app.path());
if !is_executable(&bin_path).await? {
let path = download(app, version)
.await
.context("failed downloading release archive")?;
let mut file = File::open(&path).await.context("failed opening downloaded file")?;
install(app, &mut file, &app_dir).await?;
tokio::fs::remove_file(path)
.await
.context("failed deleting temporary archive")?;
}
Ok(bin_path)
}
/// Try to find a globally system installed version of the application and ensure it is the needed
/// release version.
#[tracing::instrument(level = "trace")]
async fn find_system(app: Application, version: &str) -> Option<PathBuf> {
let result = || async {
let path = which::which(app.name())?;
let output = Command::new(&path).arg(app.version_test()).output().await?;
ensure!(
output.status.success(),
"running command `{} {}` failed",
path.display(),
app.version_test()
);
let text = String::from_utf8_lossy(&output.stdout);
let system_version = app.format_version_output(&text)?;
Ok((path, system_version))
};
match result().await {
Ok((path, system_version)) => (system_version == version).then(|| path),
Err(e) => {
tracing::debug!("system version not found for {}: {}", app.name(), e);
None
}
}
}
/// Download a file from its remote location in the given version, extract it and make it ready for
/// execution at the given location.
#[tracing::instrument(level = "trace")]
async fn download(app: Application, version: &str) -> Result<PathBuf> {
tracing::info!(version = version, "downloading {}", app.name());
let cache_dir = cache_dir().await.context("failed getting the cache directory")?;
let temp_out = cache_dir.join(format!("{}-{}.tmp", app.name(), version));
let mut file = File::create(&temp_out)
.await
.context("failed creating temporary output file")?;
let resp = reqwest::get(app.url(version)?)
.await
.context("error sending HTTP request")?;
ensure!(
resp.status().is_success(),
"error downloading archive file: {:?}\n{}",
resp.status(),
app.url(version)?
);
let mut res_bytes = resp.bytes_stream();
while let Some(chunk_res) = res_bytes.next().await {
let chunk = chunk_res.context("error reading chunk from download")?;
let _res = file.write(chunk.as_ref()).await;
}
Ok(temp_out)
}
/// Install an application from a downloaded archive locating and copying it to the given target
/// location.
#[tracing::instrument(level = "trace")]
async fn install(app: Application, archive_file: &mut File, target: &Path) -> Result<()> {
tracing::info!("installing {}", app.name());
let mut archive = Archive::new(GzipDecoder::new(BufReader::new(archive_file)));
let mut file = extract_file(&mut archive, target, Path::new(app.path())).await?;
set_executable_flag(&mut file).await?;
for path in app.extra_paths() {
// Archive must be opened for each entry as tar files don't allow jumping forth and back.
let mut archive_file = archive
.into_inner()
.map_err(|_| anyhow!("error seeking app archive"))?
.into_inner();
archive_file
.seek(SeekFrom::Start(0))
.await
.context("error seeking to beginning of archive")?;
archive = Archive::new(GzipDecoder::new(archive_file));
extract_file(&mut archive, target, Path::new(path)).await?;
}
Ok(())
}
/// Extract a single file from the given archive and put it into the target location.
async fn extract_file<R>(archive: &mut Archive<R>, target: &Path, file: &Path) -> Result<File>
where
R: AsyncRead + Unpin + Send + Sync,
{
let mut tar_file = find_tar_entry(archive, file).await?.context("file not found in archive")?;
let out = target.join(file);
if let Some(parent) = out.parent() {
tokio::fs::create_dir_all(parent)
.await
.context("failed creating output directory")?;
}
let mut out = File::create(target.join(file))
.await
.context("failed creating output file")?;
tokio::io::copy(&mut tar_file, &mut out)
.await
.context("failed copying over final output file from archive")?;
Ok(out)
}
/// Locate the cache dir for trunk and make sure it exists.
pub async fn cache_dir() -> Result<PathBuf> {
let path = ProjectDirs::from("dev", "trunkrs", "trunk")
.context("failed finding project directory")?
.cache_dir()
.to_owned();
tokio::fs::create_dir_all(&path)
.await
.context("failed creating cache directory")?;
Ok(path)
}
/// Set the executable flag for a file. Only has an effect on UNIX platforms.
async fn set_executable_flag(file: &mut File) -> Result<()> {
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = file.metadata().await.context("failed getting metadata")?.permissions();
perms.set_mode(perms.mode() | 0o100);
file.set_permissions(perms)
.await
.context("failed setting the executable flag")?;
}
Ok(())
}
/// Find an entry in a TAR archive by name and open it for reading. The first part of the path is
/// dropped as that's usually the folder name it was created from.
async fn find_tar_entry<R>(archive: &mut Archive<R>, path: impl AsRef<Path>) -> Result<Option<Entry<Archive<R>>>>
where
R: AsyncRead + Unpin + Send + Sync,
{
let mut entries = archive.entries().context("failed getting archive entries")?;
while let Some(entry) = entries.next().await {
let entry = entry.context("error while getting archive entry")?;
let name = entry.path().context("invalid entry path")?;
let mut name = name.components();
name.next();
if name.as_path() == path.as_ref() {
return Ok(Some(entry));
}
}
Ok(None)
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::{ensure, Context, Result};
#[tokio::test]
async fn download_and_install_binaries() -> Result<()> {
let dir = tempfile::tempdir().context("error creating temporary dir")?;
for &app in &[Application::WasmBindgen, Application::WasmOpt] {
let path = download(app, app.default_version())
.await
.context("error downloading app")?;
let mut file = File::open(&path).await.context("error opening file")?;
install(app, &mut file, dir.path()).await.context("error installing app")?;
std::fs::remove_file(path).context("error during cleanup")?;
}
Ok(())
}
macro_rules! table_test_format_version {
($name:ident, $app:expr, $input:literal, $expect:literal) => {
#[test]
fn $name() -> Result<()> {
let app = $app;
let output = app
.format_version_output($input)
.context("unexpected version formatting error")?;
ensure!(output == $expect, "version check output does not match: {} != {}", $expect, output);
Ok(())
}
};
}
table_test_format_version!(
wasm_opt_from_source,
Application::WasmOpt,
"wasm-opt version 101 (version_101)",
"version_101"
);
table_test_format_version!(wasm_opt_pre_compiled, Application::WasmOpt, "wasm-opt version 101", "version_101");
table_test_format_version!(wasm_bindgen_from_source, Application::WasmBindgen, "wasm-bindgen 0.2.75", "0.2.75");
table_test_format_version!(
wasm_bindgen_pre_compiled,
Application::WasmBindgen,
"wasm-bindgen 0.2.74 (27c7a4d06)",
"0.2.74"
);
}
| {
match self {
Self::WasmBindgen => "wasm-bindgen",
Self::WasmOpt => "wasm-opt",
}
} | identifier_body |
mod.rs | //! This module handles connections to Content Manager Server
//! First you connect into the ip using a tcp socket
//! Then reads/writes into it
//!
//! Packets are sent at the following format: packet_len + packet_magic + data
//! packet length: u32
//! packet magic: VT01
//!
//! Apparently, bytes received are in little endian
use std::error::Error;
use async_trait::async_trait;
use bytes::BytesMut;
use futures::{SinkExt, StreamExt};
use steam_crypto::SessionKeys;
use steam_language_gen::generated::enums::EMsg;
use steam_language_gen::SerializableBytes;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tokio_util::codec::{FramedRead, FramedWrite};
use crate::connection::encryption::handle_encryption_negotiation;
use crate::errors::ConnectionError;
use crate::messages::codec::PacketMessageCodec;
use crate::messages::message::ClientMessage;
use crate::{errors::PacketError, messages::packet::PacketMessage};
use atomic::{Atomic, Ordering};
pub(crate) mod encryption;
const PACKET_MAGIC_BYTES: &[u8] = br#"VT01"#;
/// This should be an abstraction over low-level socket handlers and is not to be used directly.
/// [SteamClient] is used for binding and connecting.
#[derive(Debug)]
pub(crate) struct SteamConnection<S> {
/// Stream of data to Steam Content server. May be TCP or Websocket.
stream: S,
/// Address to which the connection is bound.
endpoint: String,
/// Current encryption state
state: Atomic<EncryptionState>,
/// Populated after the initial handshake with Steam
session_keys: Option<SessionKeys>,
}
impl<S> SteamConnection<S> {
pub fn change_encryption_state(&self, new_state: EncryptionState) {
self.state.swap(new_state, Ordering::AcqRel);
}
}
#[async_trait]
trait Connection<S> {
async fn new_connection(ip_addr: &str) -> Result<SteamConnection<S>, Box<dyn Error>>;
async fn read_packets(&mut self) -> Result<PacketMessage, PacketError>;
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>>;
}
pub(crate) type PacketTx = UnboundedSender<PacketMessage>;
pub(crate) type MessageTx<T> = UnboundedSender<ClientMessage<T>>;
pub(crate) type DynBytes = Box<dyn SerializableBytes>;
pub(crate) type BytesTx = UnboundedSender<Box<dyn SerializableBytes + 'static>>;
#[cfg(not(feature = "websockets"))]
impl SteamConnection<TcpStream> {
async fn main_loop(mut self) -> Result<(), ConnectionError> {
let (sender, mut receiver): (UnboundedSender<DynBytes>, UnboundedReceiver<DynBytes>) =
mpsc::unbounded_channel();
let connection_state = &mut self.state;
let (stream_rx, stream_tx) = self.stream.into_split();
let mut framed_read = FramedRead::new(stream_rx, PacketMessageCodec::default());
let mut framed_write = FramedWrite::new(stream_tx, PacketMessageCodec::default());
tokio::spawn(async move {
if let Some(mes) = receiver.recv().await {
let message: Vec<u8> = mes.to_bytes();
framed_write.send(message).await.unwrap();
}
});
while let Some(packet_message) = framed_read.next().await {
let packet_message = packet_message.unwrap();
match packet_message.emsg() {
EMsg::ChannelEncryptRequest | EMsg::ChannelEncryptResponse | EMsg::ChannelEncryptResult => |
_ => {
unimplemented!()
}
};
}
Ok(())
}
}
#[cfg(not(feature = "websockets"))]
#[async_trait]
impl Connection<TcpStream> for SteamConnection<TcpStream> {
/// Opens a tcp stream to specified IP
async fn new_connection(ip_addr: &str) -> Result<SteamConnection<TcpStream>, Box<dyn Error>> {
trace!("Connecting to ip: {}", &ip_addr);
let stream = TcpStream::connect(ip_addr).await?;
Ok(SteamConnection {
stream,
endpoint: ip_addr.to_string(),
state: Atomic::new(EncryptionState::Disconnected),
session_keys: None,
})
}
#[inline]
async fn read_packets(&mut self) -> Result<PacketMessage, PacketError> {
let mut framed_stream = FramedRead::new(&mut self.stream, PacketMessageCodec::default());
Ok(framed_stream.next().await.unwrap().unwrap())
}
#[inline]
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> {
let mut output_buffer = BytesMut::with_capacity(1024);
trace!("payload size: {} ", data.len());
output_buffer.extend_from_slice(&(data.len() as u32).to_le_bytes());
output_buffer.extend_from_slice(PACKET_MAGIC_BYTES);
output_buffer.extend_from_slice(data);
let output_buffer = output_buffer.freeze();
trace!("Writing {} bytes of data to stream..", output_buffer.len());
trace!("Payload bytes: {:?}", output_buffer);
let write_result = self.stream.write(&output_buffer).await?;
trace!("write result: {}", write_result);
Ok(())
}
}
#[cfg(feature = "websockets")]
mod connection_method {
use tokio_tls::TlsStream;
use tokio_tungstenite::{connect_async, stream::Stream, WebSocketStream};
use super::*;
type Ws = WebSocketStream<Stream<TcpStream, TlsStream<TcpStream>>>;
#[async_trait]
impl Connection<Ws> for SteamConnection<Ws> {
async fn new_connection(ws_url: &str) -> Result<SteamConnection<Ws>, Box<dyn Error>> {
let formatted_ws_url = format!("wss://{}/cmsocket/", ws_url);
debug!("Connecting to addr: {}", formatted_ws_url);
let (stream, _) = connect_async(&formatted_ws_url).await?;
Ok(SteamConnection {
stream,
endpoint: formatted_ws_url,
state: EncryptionState::Disconnected,
})
}
#[inline]
async fn read_packets(&mut self) -> Result<Vec<u8>, Box<dyn Error>> {
let mut data_len: [u8; 4] = [0; 4];
self.stream.get_mut().read_exact(&mut data_len).await?;
let mut packet_magic: [u8; 4] = [0; 4];
self.stream.get_mut().read_exact(&mut packet_magic).await?;
if packet_magic != PACKET_MAGIC_BYTES {
log::error!("Could not find magic packet on read.");
}
let mut incoming_data = BytesMut::with_capacity(1024);
self.stream.get_mut().read_buf(&mut incoming_data).await?;
// sanity check
debug!("data length: {}", u32::from_le_bytes(data_len));
trace!("data: {:?}", incoming_data);
Ok(incoming_data.to_vec())
}
#[inline]
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> {
unimplemented!()
}
}
}
#[derive(Debug, Copy, Clone)]
/// Represents the current state of encryption of the connection.
/// Steam is always encrypted, with the exception when the connection is starting.
pub(crate) enum EncryptionState {
/// After initial connection is established, Steam requests to encrypt messages
/// through a [EMsg::ChannelEncryptRequest]
Connected,
/// We are challenged after Steam returns a [EMsg::ChannelEncryptResult].
///
/// After checking the result for a positive outcome, we should be `Encrypted`, else we get disconnected,
/// and try again.
Challenged,
/// We are encrypted and there is nothing left to do.
Encrypted,
/// State only after logOff or if encryption fails.
Disconnected,
}
#[cfg(test)]
mod tests {
use env_logger::Builder;
use log::LevelFilter;
use steam_language_gen::generated::enums::EMsg;
use steam_language_gen::SerializableBytes;
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
use crate::connection::encryption::handle_encrypt_request;
use crate::content_manager::dump_tcp_servers;
fn init() {
let _ = Builder::from_default_env()
.filter_module("steam_api", LevelFilter::Trace)
.is_test(true)
.try_init();
}
#[tokio::test]
#[cfg(not(feature = "websockets"))]
async fn connect_to_web_server() {
init();
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await;
assert!(steam_connection.is_ok());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[cfg(not(feature = "websockets"))]
async fn main_loop() {
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap();
steam_connection.main_loop().await.unwrap()
}
#[tokio::test]
#[cfg(not(feature = "websockets"))]
async fn test_spawn() {
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let mut steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap();
let packet_message = steam_connection.read_packets().await.unwrap();
assert_eq!(packet_message.emsg(), EMsg::ChannelEncryptRequest);
let answer = handle_encrypt_request(packet_message).to_bytes();
steam_connection.write_packets(&answer).await.unwrap();
let data = steam_connection.read_packets().await.unwrap();
assert_eq!(data.emsg(), EMsg::ChannelEncryptResult);
// steam_connection.main_loop().await.unwrap()
}
// #[tokio::test()]
// #[cfg(not(feature = "websockets"))]
// async fn answer_encrypt_request() {
// init();
//
// let cm_servers = CmServerSvList::fetch_servers(env!("STEAM_API")).await;
// let dumped_cm_servers = cm_servers.unwrap().dump_tcp_servers();
//
// let mut steam_connection: SteamConnection<TcpStream> =
// SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let data =
// steam_connection.read_packets().await.unwrap(); let message = EMsg::from_raw_message(&data);
//
// assert_eq!(message.unwrap(), EMsg::ChannelEncryptRequest);
//
//
// let answer = handle_encrypt_request(PacketMessage::from_rawdata(&data));
// steam_connection.write_packets(answer.as_slice()).await.unwrap();
// let data = steam_connection.read_packets().await.unwrap();
// let message = EMsg::from_raw_message(&data).unwrap();
// assert_eq!(message, EMsg::ChannelEncryptResult);
// }
#[tokio::test(threaded_scheduler)]
#[cfg(feature = "websockets")]
async fn connect_to_ws_server() {
init();
let get_results = CmServerSvList::fetch_servers("1").await;
let fetched_servers = get_results.unwrap().dump_ws_servers();
let steam_connection = SteamConnection::new_connection(&fetched_servers[0]).await;
assert!(steam_connection.is_ok())
}
}
| {
handle_encryption_negotiation(sender.clone(), connection_state, packet_message).unwrap();
} | conditional_block |
mod.rs | //! This module handles connections to Content Manager Server
//! First you connect into the ip using a tcp socket
//! Then reads/writes into it
//!
//! Packets are sent at the following format: packet_len + packet_magic + data
//! packet length: u32
//! packet magic: VT01
//!
//! Apparently, bytes received are in little endian
use std::error::Error;
use async_trait::async_trait;
use bytes::BytesMut;
use futures::{SinkExt, StreamExt};
use steam_crypto::SessionKeys;
use steam_language_gen::generated::enums::EMsg;
use steam_language_gen::SerializableBytes;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tokio_util::codec::{FramedRead, FramedWrite};
use crate::connection::encryption::handle_encryption_negotiation;
use crate::errors::ConnectionError;
use crate::messages::codec::PacketMessageCodec;
use crate::messages::message::ClientMessage;
use crate::{errors::PacketError, messages::packet::PacketMessage};
use atomic::{Atomic, Ordering};
pub(crate) mod encryption;
const PACKET_MAGIC_BYTES: &[u8] = br#"VT01"#;
/// This should be an abstraction over low-level socket handlers and is not to be used directly.
/// [SteamClient] is used for binding and connecting.
#[derive(Debug)]
pub(crate) struct SteamConnection<S> {
/// Stream of data to Steam Content server. May be TCP or Websocket.
stream: S,
/// Address to which the connection is bound.
endpoint: String,
/// Current encryption state
state: Atomic<EncryptionState>,
/// Populated after the initial handshake with Steam
session_keys: Option<SessionKeys>,
}
impl<S> SteamConnection<S> {
pub fn change_encryption_state(&self, new_state: EncryptionState) {
self.state.swap(new_state, Ordering::AcqRel);
}
}
#[async_trait]
trait Connection<S> {
async fn new_connection(ip_addr: &str) -> Result<SteamConnection<S>, Box<dyn Error>>;
async fn read_packets(&mut self) -> Result<PacketMessage, PacketError>;
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>>;
}
pub(crate) type PacketTx = UnboundedSender<PacketMessage>;
pub(crate) type MessageTx<T> = UnboundedSender<ClientMessage<T>>;
pub(crate) type DynBytes = Box<dyn SerializableBytes>;
pub(crate) type BytesTx = UnboundedSender<Box<dyn SerializableBytes + 'static>>;
#[cfg(not(feature = "websockets"))]
impl SteamConnection<TcpStream> {
async fn | (mut self) -> Result<(), ConnectionError> {
let (sender, mut receiver): (UnboundedSender<DynBytes>, UnboundedReceiver<DynBytes>) =
mpsc::unbounded_channel();
let connection_state = &mut self.state;
let (stream_rx, stream_tx) = self.stream.into_split();
let mut framed_read = FramedRead::new(stream_rx, PacketMessageCodec::default());
let mut framed_write = FramedWrite::new(stream_tx, PacketMessageCodec::default());
tokio::spawn(async move {
if let Some(mes) = receiver.recv().await {
let message: Vec<u8> = mes.to_bytes();
framed_write.send(message).await.unwrap();
}
});
while let Some(packet_message) = framed_read.next().await {
let packet_message = packet_message.unwrap();
match packet_message.emsg() {
EMsg::ChannelEncryptRequest | EMsg::ChannelEncryptResponse | EMsg::ChannelEncryptResult => {
handle_encryption_negotiation(sender.clone(), connection_state, packet_message).unwrap();
}
_ => {
unimplemented!()
}
};
}
Ok(())
}
}
#[cfg(not(feature = "websockets"))]
#[async_trait]
impl Connection<TcpStream> for SteamConnection<TcpStream> {
/// Opens a tcp stream to specified IP
async fn new_connection(ip_addr: &str) -> Result<SteamConnection<TcpStream>, Box<dyn Error>> {
trace!("Connecting to ip: {}", &ip_addr);
let stream = TcpStream::connect(ip_addr).await?;
Ok(SteamConnection {
stream,
endpoint: ip_addr.to_string(),
state: Atomic::new(EncryptionState::Disconnected),
session_keys: None,
})
}
#[inline]
async fn read_packets(&mut self) -> Result<PacketMessage, PacketError> {
let mut framed_stream = FramedRead::new(&mut self.stream, PacketMessageCodec::default());
Ok(framed_stream.next().await.unwrap().unwrap())
}
#[inline]
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> {
let mut output_buffer = BytesMut::with_capacity(1024);
trace!("payload size: {} ", data.len());
output_buffer.extend_from_slice(&(data.len() as u32).to_le_bytes());
output_buffer.extend_from_slice(PACKET_MAGIC_BYTES);
output_buffer.extend_from_slice(data);
let output_buffer = output_buffer.freeze();
trace!("Writing {} bytes of data to stream..", output_buffer.len());
trace!("Payload bytes: {:?}", output_buffer);
let write_result = self.stream.write(&output_buffer).await?;
trace!("write result: {}", write_result);
Ok(())
}
}
#[cfg(feature = "websockets")]
mod connection_method {
use tokio_tls::TlsStream;
use tokio_tungstenite::{connect_async, stream::Stream, WebSocketStream};
use super::*;
type Ws = WebSocketStream<Stream<TcpStream, TlsStream<TcpStream>>>;
#[async_trait]
impl Connection<Ws> for SteamConnection<Ws> {
async fn new_connection(ws_url: &str) -> Result<SteamConnection<Ws>, Box<dyn Error>> {
let formatted_ws_url = format!("wss://{}/cmsocket/", ws_url);
debug!("Connecting to addr: {}", formatted_ws_url);
let (stream, _) = connect_async(&formatted_ws_url).await?;
Ok(SteamConnection {
stream,
endpoint: formatted_ws_url,
state: EncryptionState::Disconnected,
})
}
#[inline]
async fn read_packets(&mut self) -> Result<Vec<u8>, Box<dyn Error>> {
let mut data_len: [u8; 4] = [0; 4];
self.stream.get_mut().read_exact(&mut data_len).await?;
let mut packet_magic: [u8; 4] = [0; 4];
self.stream.get_mut().read_exact(&mut packet_magic).await?;
if packet_magic != PACKET_MAGIC_BYTES {
log::error!("Could not find magic packet on read.");
}
let mut incoming_data = BytesMut::with_capacity(1024);
self.stream.get_mut().read_buf(&mut incoming_data).await?;
// sanity check
debug!("data length: {}", u32::from_le_bytes(data_len));
trace!("data: {:?}", incoming_data);
Ok(incoming_data.to_vec())
}
#[inline]
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> {
unimplemented!()
}
}
}
#[derive(Debug, Copy, Clone)]
/// Represents the current state of encryption of the connection.
/// Steam is always encrypted, with the exception when the connection is starting.
pub(crate) enum EncryptionState {
/// After initial connection is established, Steam requests to encrypt messages
/// through a [EMsg::ChannelEncryptRequest]
Connected,
/// We are challenged after Steam returns a [EMsg::ChannelEncryptResult].
///
/// After checking the result for a positive outcome, we should be `Encrypted`, else we get disconnected,
/// and try again.
Challenged,
/// We are encrypted and there is nothing left to do.
Encrypted,
/// State only after logOff or if encryption fails.
Disconnected,
}
#[cfg(test)]
mod tests {
use env_logger::Builder;
use log::LevelFilter;
use steam_language_gen::generated::enums::EMsg;
use steam_language_gen::SerializableBytes;
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
use crate::connection::encryption::handle_encrypt_request;
use crate::content_manager::dump_tcp_servers;
fn init() {
let _ = Builder::from_default_env()
.filter_module("steam_api", LevelFilter::Trace)
.is_test(true)
.try_init();
}
#[tokio::test]
#[cfg(not(feature = "websockets"))]
async fn connect_to_web_server() {
init();
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await;
assert!(steam_connection.is_ok());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[cfg(not(feature = "websockets"))]
async fn main_loop() {
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap();
steam_connection.main_loop().await.unwrap()
}
#[tokio::test]
#[cfg(not(feature = "websockets"))]
async fn test_spawn() {
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let mut steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap();
let packet_message = steam_connection.read_packets().await.unwrap();
assert_eq!(packet_message.emsg(), EMsg::ChannelEncryptRequest);
let answer = handle_encrypt_request(packet_message).to_bytes();
steam_connection.write_packets(&answer).await.unwrap();
let data = steam_connection.read_packets().await.unwrap();
assert_eq!(data.emsg(), EMsg::ChannelEncryptResult);
// steam_connection.main_loop().await.unwrap()
}
// #[tokio::test()]
// #[cfg(not(feature = "websockets"))]
// async fn answer_encrypt_request() {
// init();
//
// let cm_servers = CmServerSvList::fetch_servers(env!("STEAM_API")).await;
// let dumped_cm_servers = cm_servers.unwrap().dump_tcp_servers();
//
// let mut steam_connection: SteamConnection<TcpStream> =
// SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let data =
// steam_connection.read_packets().await.unwrap(); let message = EMsg::from_raw_message(&data);
//
// assert_eq!(message.unwrap(), EMsg::ChannelEncryptRequest);
//
//
// let answer = handle_encrypt_request(PacketMessage::from_rawdata(&data));
// steam_connection.write_packets(answer.as_slice()).await.unwrap();
// let data = steam_connection.read_packets().await.unwrap();
// let message = EMsg::from_raw_message(&data).unwrap();
// assert_eq!(message, EMsg::ChannelEncryptResult);
// }
#[tokio::test(threaded_scheduler)]
#[cfg(feature = "websockets")]
async fn connect_to_ws_server() {
init();
let get_results = CmServerSvList::fetch_servers("1").await;
let fetched_servers = get_results.unwrap().dump_ws_servers();
let steam_connection = SteamConnection::new_connection(&fetched_servers[0]).await;
assert!(steam_connection.is_ok())
}
}
| main_loop | identifier_name |
mod.rs | //! This module handles connections to Content Manager Server
//! First you connect into the ip using a tcp socket
//! Then reads/writes into it
//!
//! Packets are sent at the following format: packet_len + packet_magic + data
//! packet length: u32
//! packet magic: VT01
//!
//! Apparently, bytes received are in little endian
use std::error::Error;
use async_trait::async_trait;
use bytes::BytesMut;
use futures::{SinkExt, StreamExt};
use steam_crypto::SessionKeys;
use steam_language_gen::generated::enums::EMsg;
use steam_language_gen::SerializableBytes;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tokio_util::codec::{FramedRead, FramedWrite};
use crate::connection::encryption::handle_encryption_negotiation;
use crate::errors::ConnectionError;
use crate::messages::codec::PacketMessageCodec;
use crate::messages::message::ClientMessage;
use crate::{errors::PacketError, messages::packet::PacketMessage};
use atomic::{Atomic, Ordering};
pub(crate) mod encryption;
const PACKET_MAGIC_BYTES: &[u8] = br#"VT01"#;
/// This should be an abstraction over low-level socket handlers and is not to be used directly.
/// [SteamClient] is used for binding and connecting.
#[derive(Debug)]
pub(crate) struct SteamConnection<S> {
/// Stream of data to Steam Content server. May be TCP or Websocket.
stream: S,
/// Address to which the connection is bound.
endpoint: String,
/// Current encryption state
state: Atomic<EncryptionState>,
/// Populated after the initial handshake with Steam
session_keys: Option<SessionKeys>,
}
impl<S> SteamConnection<S> {
pub fn change_encryption_state(&self, new_state: EncryptionState) {
self.state.swap(new_state, Ordering::AcqRel);
}
}
#[async_trait]
trait Connection<S> {
async fn new_connection(ip_addr: &str) -> Result<SteamConnection<S>, Box<dyn Error>>;
async fn read_packets(&mut self) -> Result<PacketMessage, PacketError>;
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>>;
}
pub(crate) type PacketTx = UnboundedSender<PacketMessage>;
pub(crate) type MessageTx<T> = UnboundedSender<ClientMessage<T>>;
pub(crate) type DynBytes = Box<dyn SerializableBytes>;
pub(crate) type BytesTx = UnboundedSender<Box<dyn SerializableBytes + 'static>>;
#[cfg(not(feature = "websockets"))]
impl SteamConnection<TcpStream> {
async fn main_loop(mut self) -> Result<(), ConnectionError> {
let (sender, mut receiver): (UnboundedSender<DynBytes>, UnboundedReceiver<DynBytes>) =
mpsc::unbounded_channel();
let connection_state = &mut self.state;
let (stream_rx, stream_tx) = self.stream.into_split();
let mut framed_read = FramedRead::new(stream_rx, PacketMessageCodec::default());
let mut framed_write = FramedWrite::new(stream_tx, PacketMessageCodec::default());
tokio::spawn(async move {
if let Some(mes) = receiver.recv().await {
let message: Vec<u8> = mes.to_bytes();
framed_write.send(message).await.unwrap();
}
});
while let Some(packet_message) = framed_read.next().await {
let packet_message = packet_message.unwrap();
match packet_message.emsg() {
EMsg::ChannelEncryptRequest | EMsg::ChannelEncryptResponse | EMsg::ChannelEncryptResult => {
handle_encryption_negotiation(sender.clone(), connection_state, packet_message).unwrap();
}
_ => {
unimplemented!()
}
};
}
Ok(())
}
}
#[cfg(not(feature = "websockets"))]
#[async_trait]
impl Connection<TcpStream> for SteamConnection<TcpStream> {
/// Opens a tcp stream to specified IP
async fn new_connection(ip_addr: &str) -> Result<SteamConnection<TcpStream>, Box<dyn Error>> {
trace!("Connecting to ip: {}", &ip_addr);
let stream = TcpStream::connect(ip_addr).await?;
Ok(SteamConnection {
stream,
endpoint: ip_addr.to_string(),
state: Atomic::new(EncryptionState::Disconnected),
session_keys: None,
})
}
#[inline]
async fn read_packets(&mut self) -> Result<PacketMessage, PacketError> {
let mut framed_stream = FramedRead::new(&mut self.stream, PacketMessageCodec::default());
Ok(framed_stream.next().await.unwrap().unwrap()) | #[inline]
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> {
let mut output_buffer = BytesMut::with_capacity(1024);
trace!("payload size: {} ", data.len());
output_buffer.extend_from_slice(&(data.len() as u32).to_le_bytes());
output_buffer.extend_from_slice(PACKET_MAGIC_BYTES);
output_buffer.extend_from_slice(data);
let output_buffer = output_buffer.freeze();
trace!("Writing {} bytes of data to stream..", output_buffer.len());
trace!("Payload bytes: {:?}", output_buffer);
let write_result = self.stream.write(&output_buffer).await?;
trace!("write result: {}", write_result);
Ok(())
}
}
#[cfg(feature = "websockets")]
mod connection_method {
use tokio_tls::TlsStream;
use tokio_tungstenite::{connect_async, stream::Stream, WebSocketStream};
use super::*;
type Ws = WebSocketStream<Stream<TcpStream, TlsStream<TcpStream>>>;
#[async_trait]
impl Connection<Ws> for SteamConnection<Ws> {
async fn new_connection(ws_url: &str) -> Result<SteamConnection<Ws>, Box<dyn Error>> {
let formatted_ws_url = format!("wss://{}/cmsocket/", ws_url);
debug!("Connecting to addr: {}", formatted_ws_url);
let (stream, _) = connect_async(&formatted_ws_url).await?;
Ok(SteamConnection {
stream,
endpoint: formatted_ws_url,
state: EncryptionState::Disconnected,
})
}
#[inline]
async fn read_packets(&mut self) -> Result<Vec<u8>, Box<dyn Error>> {
let mut data_len: [u8; 4] = [0; 4];
self.stream.get_mut().read_exact(&mut data_len).await?;
let mut packet_magic: [u8; 4] = [0; 4];
self.stream.get_mut().read_exact(&mut packet_magic).await?;
if packet_magic != PACKET_MAGIC_BYTES {
log::error!("Could not find magic packet on read.");
}
let mut incoming_data = BytesMut::with_capacity(1024);
self.stream.get_mut().read_buf(&mut incoming_data).await?;
// sanity check
debug!("data length: {}", u32::from_le_bytes(data_len));
trace!("data: {:?}", incoming_data);
Ok(incoming_data.to_vec())
}
#[inline]
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> {
unimplemented!()
}
}
}
#[derive(Debug, Copy, Clone)]
/// Represents the current state of encryption of the connection.
/// Steam is always encrypted, with the exception when the connection is starting.
pub(crate) enum EncryptionState {
/// After initial connection is established, Steam requests to encrypt messages
/// through a [EMsg::ChannelEncryptRequest]
Connected,
/// We are challenged after Steam returns a [EMsg::ChannelEncryptResult].
///
/// After checking the result for a positive outcome, we should be `Encrypted`, else we get disconnected,
/// and try again.
Challenged,
/// We are encrypted and there is nothing left to do.
Encrypted,
/// State only after logOff or if encryption fails.
Disconnected,
}
#[cfg(test)]
mod tests {
use env_logger::Builder;
use log::LevelFilter;
use steam_language_gen::generated::enums::EMsg;
use steam_language_gen::SerializableBytes;
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
use crate::connection::encryption::handle_encrypt_request;
use crate::content_manager::dump_tcp_servers;
fn init() {
let _ = Builder::from_default_env()
.filter_module("steam_api", LevelFilter::Trace)
.is_test(true)
.try_init();
}
#[tokio::test]
#[cfg(not(feature = "websockets"))]
async fn connect_to_web_server() {
init();
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await;
assert!(steam_connection.is_ok());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[cfg(not(feature = "websockets"))]
async fn main_loop() {
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap();
steam_connection.main_loop().await.unwrap()
}
#[tokio::test]
#[cfg(not(feature = "websockets"))]
async fn test_spawn() {
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let mut steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap();
let packet_message = steam_connection.read_packets().await.unwrap();
assert_eq!(packet_message.emsg(), EMsg::ChannelEncryptRequest);
let answer = handle_encrypt_request(packet_message).to_bytes();
steam_connection.write_packets(&answer).await.unwrap();
let data = steam_connection.read_packets().await.unwrap();
assert_eq!(data.emsg(), EMsg::ChannelEncryptResult);
// steam_connection.main_loop().await.unwrap()
}
// #[tokio::test()]
// #[cfg(not(feature = "websockets"))]
// async fn answer_encrypt_request() {
// init();
//
// let cm_servers = CmServerSvList::fetch_servers(env!("STEAM_API")).await;
// let dumped_cm_servers = cm_servers.unwrap().dump_tcp_servers();
//
// let mut steam_connection: SteamConnection<TcpStream> =
// SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let data =
// steam_connection.read_packets().await.unwrap(); let message = EMsg::from_raw_message(&data);
//
// assert_eq!(message.unwrap(), EMsg::ChannelEncryptRequest);
//
//
// let answer = handle_encrypt_request(PacketMessage::from_rawdata(&data));
// steam_connection.write_packets(answer.as_slice()).await.unwrap();
// let data = steam_connection.read_packets().await.unwrap();
// let message = EMsg::from_raw_message(&data).unwrap();
// assert_eq!(message, EMsg::ChannelEncryptResult);
// }
#[tokio::test(threaded_scheduler)]
#[cfg(feature = "websockets")]
async fn connect_to_ws_server() {
init();
let get_results = CmServerSvList::fetch_servers("1").await;
let fetched_servers = get_results.unwrap().dump_ws_servers();
let steam_connection = SteamConnection::new_connection(&fetched_servers[0]).await;
assert!(steam_connection.is_ok())
}
} | }
| random_line_split |
mod.rs | //! This module handles connections to Content Manager Server
//! First you connect into the ip using a tcp socket
//! Then reads/writes into it
//!
//! Packets are sent at the following format: packet_len + packet_magic + data
//! packet length: u32
//! packet magic: VT01
//!
//! Apparently, bytes received are in little endian
use std::error::Error;
use async_trait::async_trait;
use bytes::BytesMut;
use futures::{SinkExt, StreamExt};
use steam_crypto::SessionKeys;
use steam_language_gen::generated::enums::EMsg;
use steam_language_gen::SerializableBytes;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tokio_util::codec::{FramedRead, FramedWrite};
use crate::connection::encryption::handle_encryption_negotiation;
use crate::errors::ConnectionError;
use crate::messages::codec::PacketMessageCodec;
use crate::messages::message::ClientMessage;
use crate::{errors::PacketError, messages::packet::PacketMessage};
use atomic::{Atomic, Ordering};
pub(crate) mod encryption;
const PACKET_MAGIC_BYTES: &[u8] = br#"VT01"#;
/// This should be an abstraction over low-level socket handlers and is not to be used directly.
/// [SteamClient] is used for binding and connecting.
#[derive(Debug)]
pub(crate) struct SteamConnection<S> {
/// Stream of data to Steam Content server. May be TCP or Websocket.
stream: S,
/// Address to which the connection is bound.
endpoint: String,
/// Current encryption state
state: Atomic<EncryptionState>,
/// Populated after the initial handshake with Steam
session_keys: Option<SessionKeys>,
}
impl<S> SteamConnection<S> {
pub fn change_encryption_state(&self, new_state: EncryptionState) {
self.state.swap(new_state, Ordering::AcqRel);
}
}
#[async_trait]
trait Connection<S> {
async fn new_connection(ip_addr: &str) -> Result<SteamConnection<S>, Box<dyn Error>>;
async fn read_packets(&mut self) -> Result<PacketMessage, PacketError>;
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>>;
}
pub(crate) type PacketTx = UnboundedSender<PacketMessage>;
pub(crate) type MessageTx<T> = UnboundedSender<ClientMessage<T>>;
pub(crate) type DynBytes = Box<dyn SerializableBytes>;
pub(crate) type BytesTx = UnboundedSender<Box<dyn SerializableBytes + 'static>>;
#[cfg(not(feature = "websockets"))]
impl SteamConnection<TcpStream> {
async fn main_loop(mut self) -> Result<(), ConnectionError> |
}
#[cfg(not(feature = "websockets"))]
#[async_trait]
impl Connection<TcpStream> for SteamConnection<TcpStream> {
/// Opens a tcp stream to specified IP
async fn new_connection(ip_addr: &str) -> Result<SteamConnection<TcpStream>, Box<dyn Error>> {
trace!("Connecting to ip: {}", &ip_addr);
let stream = TcpStream::connect(ip_addr).await?;
Ok(SteamConnection {
stream,
endpoint: ip_addr.to_string(),
state: Atomic::new(EncryptionState::Disconnected),
session_keys: None,
})
}
#[inline]
async fn read_packets(&mut self) -> Result<PacketMessage, PacketError> {
let mut framed_stream = FramedRead::new(&mut self.stream, PacketMessageCodec::default());
Ok(framed_stream.next().await.unwrap().unwrap())
}
#[inline]
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> {
let mut output_buffer = BytesMut::with_capacity(1024);
trace!("payload size: {} ", data.len());
output_buffer.extend_from_slice(&(data.len() as u32).to_le_bytes());
output_buffer.extend_from_slice(PACKET_MAGIC_BYTES);
output_buffer.extend_from_slice(data);
let output_buffer = output_buffer.freeze();
trace!("Writing {} bytes of data to stream..", output_buffer.len());
trace!("Payload bytes: {:?}", output_buffer);
let write_result = self.stream.write(&output_buffer).await?;
trace!("write result: {}", write_result);
Ok(())
}
}
#[cfg(feature = "websockets")]
mod connection_method {
use tokio_tls::TlsStream;
use tokio_tungstenite::{connect_async, stream::Stream, WebSocketStream};
use super::*;
type Ws = WebSocketStream<Stream<TcpStream, TlsStream<TcpStream>>>;
#[async_trait]
impl Connection<Ws> for SteamConnection<Ws> {
async fn new_connection(ws_url: &str) -> Result<SteamConnection<Ws>, Box<dyn Error>> {
let formatted_ws_url = format!("wss://{}/cmsocket/", ws_url);
debug!("Connecting to addr: {}", formatted_ws_url);
let (stream, _) = connect_async(&formatted_ws_url).await?;
Ok(SteamConnection {
stream,
endpoint: formatted_ws_url,
state: EncryptionState::Disconnected,
})
}
#[inline]
async fn read_packets(&mut self) -> Result<Vec<u8>, Box<dyn Error>> {
let mut data_len: [u8; 4] = [0; 4];
self.stream.get_mut().read_exact(&mut data_len).await?;
let mut packet_magic: [u8; 4] = [0; 4];
self.stream.get_mut().read_exact(&mut packet_magic).await?;
if packet_magic != PACKET_MAGIC_BYTES {
log::error!("Could not find magic packet on read.");
}
let mut incoming_data = BytesMut::with_capacity(1024);
self.stream.get_mut().read_buf(&mut incoming_data).await?;
// sanity check
debug!("data length: {}", u32::from_le_bytes(data_len));
trace!("data: {:?}", incoming_data);
Ok(incoming_data.to_vec())
}
#[inline]
async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> {
unimplemented!()
}
}
}
#[derive(Debug, Copy, Clone)]
/// Represents the current state of encryption of the connection.
/// Steam is always encrypted, with the exception when the connection is starting.
pub(crate) enum EncryptionState {
/// After initial connection is established, Steam requests to encrypt messages
/// through a [EMsg::ChannelEncryptRequest]
Connected,
/// We are challenged after Steam returns a [EMsg::ChannelEncryptResult].
///
/// After checking the result for a positive outcome, we should be `Encrypted`, else we get disconnected,
/// and try again.
Challenged,
/// We are encrypted and there is nothing left to do.
Encrypted,
/// State only after logOff or if encryption fails.
Disconnected,
}
#[cfg(test)]
mod tests {
use env_logger::Builder;
use log::LevelFilter;
use steam_language_gen::generated::enums::EMsg;
use steam_language_gen::SerializableBytes;
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
use crate::connection::encryption::handle_encrypt_request;
use crate::content_manager::dump_tcp_servers;
fn init() {
let _ = Builder::from_default_env()
.filter_module("steam_api", LevelFilter::Trace)
.is_test(true)
.try_init();
}
#[tokio::test]
#[cfg(not(feature = "websockets"))]
async fn connect_to_web_server() {
init();
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await;
assert!(steam_connection.is_ok());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[cfg(not(feature = "websockets"))]
async fn main_loop() {
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap();
steam_connection.main_loop().await.unwrap()
}
#[tokio::test]
#[cfg(not(feature = "websockets"))]
async fn test_spawn() {
let dumped_cm_servers = dump_tcp_servers().await.unwrap();
let mut steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap();
let packet_message = steam_connection.read_packets().await.unwrap();
assert_eq!(packet_message.emsg(), EMsg::ChannelEncryptRequest);
let answer = handle_encrypt_request(packet_message).to_bytes();
steam_connection.write_packets(&answer).await.unwrap();
let data = steam_connection.read_packets().await.unwrap();
assert_eq!(data.emsg(), EMsg::ChannelEncryptResult);
// steam_connection.main_loop().await.unwrap()
}
// #[tokio::test()]
// #[cfg(not(feature = "websockets"))]
// async fn answer_encrypt_request() {
// init();
//
// let cm_servers = CmServerSvList::fetch_servers(env!("STEAM_API")).await;
// let dumped_cm_servers = cm_servers.unwrap().dump_tcp_servers();
//
// let mut steam_connection: SteamConnection<TcpStream> =
// SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let data =
// steam_connection.read_packets().await.unwrap(); let message = EMsg::from_raw_message(&data);
//
// assert_eq!(message.unwrap(), EMsg::ChannelEncryptRequest);
//
//
// let answer = handle_encrypt_request(PacketMessage::from_rawdata(&data));
// steam_connection.write_packets(answer.as_slice()).await.unwrap();
// let data = steam_connection.read_packets().await.unwrap();
// let message = EMsg::from_raw_message(&data).unwrap();
// assert_eq!(message, EMsg::ChannelEncryptResult);
// }
#[tokio::test(threaded_scheduler)]
#[cfg(feature = "websockets")]
async fn connect_to_ws_server() {
init();
let get_results = CmServerSvList::fetch_servers("1").await;
let fetched_servers = get_results.unwrap().dump_ws_servers();
let steam_connection = SteamConnection::new_connection(&fetched_servers[0]).await;
assert!(steam_connection.is_ok())
}
}
| {
let (sender, mut receiver): (UnboundedSender<DynBytes>, UnboundedReceiver<DynBytes>) =
mpsc::unbounded_channel();
let connection_state = &mut self.state;
let (stream_rx, stream_tx) = self.stream.into_split();
let mut framed_read = FramedRead::new(stream_rx, PacketMessageCodec::default());
let mut framed_write = FramedWrite::new(stream_tx, PacketMessageCodec::default());
tokio::spawn(async move {
if let Some(mes) = receiver.recv().await {
let message: Vec<u8> = mes.to_bytes();
framed_write.send(message).await.unwrap();
}
});
while let Some(packet_message) = framed_read.next().await {
let packet_message = packet_message.unwrap();
match packet_message.emsg() {
EMsg::ChannelEncryptRequest | EMsg::ChannelEncryptResponse | EMsg::ChannelEncryptResult => {
handle_encryption_negotiation(sender.clone(), connection_state, packet_message).unwrap();
}
_ => {
unimplemented!()
}
};
}
Ok(())
} | identifier_body |
zipfian_generator.go | package generator
import (
"math"
)
const (
ZipfianConstant = float64(0.99)
)
// Compute the zeta constant needed for the distribution.
// Do this incrementally for a distribution that has n items now
// but used to have st items. Use the zipfian constant theta.
// Remember the new value of n so that if we change the itemCount,
// we'll know to recompute zeta.
func zeta(st, n int64, theta, initialSum float64) (int64, float64) {
countForzata := n
return countForzata, zetaStatic(st, n, theta, initialSum)
}
// Compute the zeta constant needed for the distribution. Do this incrementally
// for a distribution that has h items now but used to have st items.
// Use the zipfian constant theta. Remember the new value of n so that
// if we change itemCount, we'll know to recompute zeta.
func zetaStatic(st, n int64, theta, initialSum float64) float64 {
sum := initialSum
for i := st; i < n; i++ {
sum += 1 / math.Pow(float64(i+1), theta)
}
return sum
}
// A generator of a zipfian distribution. It produces a sequence of items,
// such that some items are more popular than others, according to
// a zipfian distribution. When you construct an instance of this class,
// you specify the number of items in the set to draw from, either by
// specifying an itemcount (so that the sequence is of items from 0 to
// itemcount-1) or by specifying a min and a max (so that the sequence
// is of items from min to max inclusive). After you construct the instance,
// you can change the number of items by calling NextInt() or nextLong().
//
// Note that the popular items will be clustered together, e.g. item 0
// is the most popular, item 1 the second most popular, and so on (or min
// is the most popular, min+1 the next most popular, etc.)
// If you don't want this clustering, and instead want the popular items
// scattered throughout the item space, then use ScrambledZipfianGenerator
// instead.
//
// Be aware: initializing this generator may take a long time if there are
// lots of items to choose from (e.g. over a minute for 100 million objects).
// This is because certain mathematical values need to be computed to properly
// generate a zipfian skew, and one of those values (zeta) is a sum sequence
// from 1 to n, where n is the itemCount. Note that if you increase the number
// of items in the set, we can compute a new zeta incrementally, so it should
// be fast unless you have added millions of items. However, if you decrease
// the number of items, we recompute zeta from scratch, so this can take
// a long time.
//
// The algorithm used here is from
// "Quickly Generating Billion-Record Synthetic Databases",
// Jim Gray et al, SIGMOD 1994.
//
type ZipfianGenerator struct {
*IntegerGeneratorBase
// Number of items.
items int64
// Min item to generate. | zipfianConstant float64
// Computed parameters for generating the distribution.
alpha, zetan, eta, theta, zeta2theta float64
// The number of items used to compute zetan the last time.
countForzata int64
// Flag to prevent problems. If you increase the number of items which
// the zipfian generator is allowed to choose from, this code will
// incrementally compute a new zeta value for the larger itemcount.
// However, if you decrease the number of items, the code computes
// zeta from scratch; this is expensive for large itemsets.
// Usually this is not intentional; e.g. one goroutine thinks
// the number of items is 1001 and calls "NextLong()" with that item count;
// then another goroutine who thinks the number of items is 1000 calls
// NextLong() with itemCount=1000 triggering the expensive recomputation.
// (It is expensive for 100 million items, not really for 1000 items.)
// Why did the second goroutine think there were only 1000 items?
// maybe it read the item count before the first goroutine incremented it.
// So this flag allows you to say if you really do want that recomputation.
// If true, then the code will recompute zeta if the itemcount goes down.
// If false, the code will assume itemcount only goes up, and never
// recompute.
allowItemCountDecrease bool
}
// Create a zipfian generator for items between min and max(inclusive).
func NewZipfianGeneratorByInterval(min, max int64) *ZipfianGenerator {
zeta := zetaStatic(min, max-min+1, ZipfianConstant, 0)
return NewZipfianGenerator(min, max, ZipfianConstant, zeta)
}
// Create a zipfian generator for items between min and max(inclusive) for
// the specified zipfian constant, using the precomputed value of zeta.
func NewZipfianGenerator(
min, max int64, zipfianConstant, zetan float64) *ZipfianGenerator {
items := max - min + 1
base := min
theta := zipfianConstant
countForzata, zeta2theta := zeta(0, 2, theta, 0)
alpha := 1.0 / (1.0 - theta)
countForzata = items
eta := (1 - math.Pow(2.0/float64(items), 1-theta)) / (1 - zeta2theta/zetan)
object := &ZipfianGenerator{
IntegerGeneratorBase: NewIntegerGeneratorBase(0),
items: items,
base: base,
zipfianConstant: zipfianConstant,
alpha: alpha,
zetan: zetan,
eta: eta,
theta: theta,
zeta2theta: zeta2theta,
countForzata: countForzata,
allowItemCountDecrease: false,
}
object.NextInt()
return object
}
// Return the next value, skewed by the zipfian distribution. The 0th item will
// be the most popular, followed by the 1st, followed by the 2nd, etc.
// (or, if min != 0, the min-th item is the most popular, the min+1th item
// the next most popular, etc.) If you want the popular items
// scattered throughout the item space, use ScrambledZipfianGenerator instead.
func (self *ZipfianGenerator) NextInt() int64 {
return self.Next(self.items)
}
// Return the next value, skewed by the zipfian distribution. The 0th item will
// be the most popular, followed by the 1st, followed by the 2nd, etc.
// (same as NextInt())
func (self *ZipfianGenerator) NextLong() int64 {
return self.Next(self.items)
}
// Generate the next item. this distribution will be skewed toward
// lower itegers; e.g. 0 will be the most popular, 1 the next most popular, etc.
func (self *ZipfianGenerator) Next(itemCount int64) int64 {
var ret int64
defer func(r *int64) {
self.IntegerGeneratorBase.SetLastInt(*r)
}(&ret)
if itemCount != self.countForzata {
if itemCount > self.countForzata {
self.countForzata, self.zetan = zeta(self.countForzata, itemCount, self.theta, self.zetan)
self.eta = (1 - math.Pow(float64(2.0/self.items), 1-self.theta)) / (1 - self.zeta2theta/self.zetan)
} else if (itemCount < self.countForzata) && (self.allowItemCountDecrease) {
self.countForzata, self.zetan = zeta(0, itemCount, self.theta, 0)
self.eta = (1 - math.Pow(float64(2.0/self.items), 1-self.theta)) / (1 - self.zeta2theta/self.zetan)
}
}
u := NextFloat64()
uz := u * self.zetan
if uz < 1.0 {
ret = self.base
return ret
}
if uz < 1.0+math.Pow(0.5, self.theta) {
ret = self.base + 1
return ret
}
ret = self.base + int64(float64(itemCount)*math.Pow(self.eta*u-self.eta+1.0, self.alpha))
return ret
}
func (self *ZipfianGenerator) NextString() string {
return self.IntegerGeneratorBase.NextString(self)
}
func (self *ZipfianGenerator) Mean() float64 {
panic("unsupported operation")
}
var (
Zetan = float64(26.46902820178302)
UsedZipfianConstant = float64(0.99)
ItemCount = float64(10000000000)
)
// A generator of a zipfian distribution. It produces a sequence of items,
// such that some items are more popular than others, according to a zipfian
// distribution. When you construct an instance of this class, you specify
// the number of items in the set to draw from, either by specifying
// an itemCount(so that the sequence is of items from 0 to itemCount-1) or
// by specifying a min and a max (so that the sequence is of items from min
// to max inclusive). After you construct the instance, you can change
// the number of items by calling NextInt(itemCount) or Next(itemCount).
// Unlike ZipfianGenerator, this class scatters the "popular" items across
// the item space. Use this, instead of ZipfianGenerator, if you don't want
// the head of the distribution(the popular items) clustered together.
type ScrambledZipfianGenerator struct {
*IntegerGeneratorBase
gen *ZipfianGenerator
min int64
max int64
itemCount int64
}
// Create a zipfian generator for the specified number of items.
func NewScrambledZipfianGeneratorByItems(items int64) *ScrambledZipfianGenerator {
return NewScrambledZipfianGenerator(0, items-1)
}
// Create a zipfian generator for items between min and max (inclusive) for
// the specified zipfian constant. If you use a zipfian constant other than
// 0.99, this will take a long time complete because we need to recompute
// zeta.
func NewScrambledZipfianGeneratorConstant(min, max int64, constant float64) *ScrambledZipfianGenerator {
var gen *ZipfianGenerator
itemCount := max - min + 1
if constant == UsedZipfianConstant {
gen = NewZipfianGenerator(0, itemCount, constant, Zetan)
} else {
zeta := zetaStatic(0, itemCount, constant, 0)
gen = NewZipfianGenerator(0, itemCount, constant, zeta)
}
return &ScrambledZipfianGenerator{
IntegerGeneratorBase: NewIntegerGeneratorBase(min),
gen: gen,
min: min,
max: max,
itemCount: max - min + 1,
}
}
// Create a zipfian generator for items between min and max(inclusive).
func NewScrambledZipfianGenerator(min, max int64) *ScrambledZipfianGenerator {
return NewScrambledZipfianGeneratorConstant(min, max, ZipfianConstant)
}
// Return the next int in the sequence.
func (self *ScrambledZipfianGenerator) NextInt() int64 {
return self.Next()
}
// return the next item in the sequence.
func (self *ScrambledZipfianGenerator) Next() int64 {
ret := self.gen.NextLong()
ret = self.min + int64(FNVHash64(uint64(ret))%uint64(self.itemCount))
self.SetLastInt(ret)
return ret
}
func (self *ScrambledZipfianGenerator) NextString() string {
return self.IntegerGeneratorBase.NextString(self)
}
// Since the values are scrambed (hopefully uniformly), the mean is simply
// the middle of the range.
func (self *ScrambledZipfianGenerator) Mean() float64 {
return float64(self.min+self.max) / 2.0
}
// Hash a integer value.
func Hash(value int64) uint64 {
return FNVHash64(uint64(value))
}
const (
FNVOffsetBasis32 = uint32(0x811c9dc5)
FNVPrime32 = uint32(16777619)
)
// 32 bit FNV hash.
// Refer to http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
func FNVHash32(value uint32) uint32 {
hash := FNVOffsetBasis32
for i := 0; i < 4; i++ {
octet := value & 0x00FF
value >>= 8
hash ^= octet
hash *= FNVPrime32
}
return hash
}
const (
FNVOffsetBasis64 = uint64(0xCBF29CE484222325)
FNVPrime64 = uint64(1099511628211)
)
// 64 bit FNV hash.
// Refer to http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
func FNVHash64(value uint64) uint64 {
hash := FNVOffsetBasis64
for i := 0; i < 8; i++ {
octet := value & 0x00FF
value >>= 8
hash ^= octet
hash *= FNVPrime64
}
return hash
} | base int64
// The zipfian constant to use. | random_line_split |
zipfian_generator.go | package generator
import (
"math"
)
const (
ZipfianConstant = float64(0.99)
)
// Compute the zeta constant needed for the distribution.
// Do this incrementally for a distribution that has n items now
// but used to have st items. Use the zipfian constant theta.
// Remember the new value of n so that if we change the itemCount,
// we'll know to recompute zeta.
func zeta(st, n int64, theta, initialSum float64) (int64, float64) {
countForzata := n
return countForzata, zetaStatic(st, n, theta, initialSum)
}
// Compute the zeta constant needed for the distribution. Do this incrementally
// for a distribution that has h items now but used to have st items.
// Use the zipfian constant theta. Remember the new value of n so that
// if we change itemCount, we'll know to recompute zeta.
func zetaStatic(st, n int64, theta, initialSum float64) float64 {
sum := initialSum
for i := st; i < n; i++ {
sum += 1 / math.Pow(float64(i+1), theta)
}
return sum
}
// A generator of a zipfian distribution. It produces a sequence of items,
// such that some items are more popular than others, according to
// a zipfian distribution. When you construct an instance of this class,
// you specify the number of items in the set to draw from, either by
// specifying an itemcount (so that the sequence is of items from 0 to
// itemcount-1) or by specifying a min and a max (so that the sequence
// is of items from min to max inclusive). After you construct the instance,
// you can change the number of items by calling NextInt() or nextLong().
//
// Note that the popular items will be clustered together, e.g. item 0
// is the most popular, item 1 the second most popular, and so on (or min
// is the most popular, min+1 the next most popular, etc.)
// If you don't want this clustering, and instead want the popular items
// scattered throughout the item space, then use ScrambledZipfianGenerator
// instead.
//
// Be aware: initializing this generator may take a long time if there are
// lots of items to choose from (e.g. over a minute for 100 million objects).
// This is because certain mathematical values need to be computed to properly
// generate a zipfian skew, and one of those values (zeta) is a sum sequence
// from 1 to n, where n is the itemCount. Note that if you increase the number
// of items in the set, we can compute a new zeta incrementally, so it should
// be fast unless you have added millions of items. However, if you decrease
// the number of items, we recompute zeta from scratch, so this can take
// a long time.
//
// The algorithm used here is from
// "Quickly Generating Billion-Record Synthetic Databases",
// Jim Gray et al, SIGMOD 1994.
//
type ZipfianGenerator struct {
*IntegerGeneratorBase
// Number of items.
items int64
// Min item to generate.
base int64
// The zipfian constant to use.
zipfianConstant float64
// Computed parameters for generating the distribution.
alpha, zetan, eta, theta, zeta2theta float64
// The number of items used to compute zetan the last time.
countForzata int64
// Flag to prevent problems. If you increase the number of items which
// the zipfian generator is allowed to choose from, this code will
// incrementally compute a new zeta value for the larger itemcount.
// However, if you decrease the number of items, the code computes
// zeta from scratch; this is expensive for large itemsets.
// Usually this is not intentional; e.g. one goroutine thinks
// the number of items is 1001 and calls "NextLong()" with that item count;
// then another goroutine who thinks the number of items is 1000 calls
// NextLong() with itemCount=1000 triggering the expensive recomputation.
// (It is expensive for 100 million items, not really for 1000 items.)
// Why did the second goroutine think there were only 1000 items?
// maybe it read the item count before the first goroutine incremented it.
// So this flag allows you to say if you really do want that recomputation.
// If true, then the code will recompute zeta if the itemcount goes down.
// If false, the code will assume itemcount only goes up, and never
// recompute.
allowItemCountDecrease bool
}
// Create a zipfian generator for items between min and max(inclusive).
func NewZipfianGeneratorByInterval(min, max int64) *ZipfianGenerator {
zeta := zetaStatic(min, max-min+1, ZipfianConstant, 0)
return NewZipfianGenerator(min, max, ZipfianConstant, zeta)
}
// Create a zipfian generator for items between min and max(inclusive) for
// the specified zipfian constant, using the precomputed value of zeta.
func NewZipfianGenerator(
min, max int64, zipfianConstant, zetan float64) *ZipfianGenerator {
items := max - min + 1
base := min
theta := zipfianConstant
countForzata, zeta2theta := zeta(0, 2, theta, 0)
alpha := 1.0 / (1.0 - theta)
countForzata = items
eta := (1 - math.Pow(2.0/float64(items), 1-theta)) / (1 - zeta2theta/zetan)
object := &ZipfianGenerator{
IntegerGeneratorBase: NewIntegerGeneratorBase(0),
items: items,
base: base,
zipfianConstant: zipfianConstant,
alpha: alpha,
zetan: zetan,
eta: eta,
theta: theta,
zeta2theta: zeta2theta,
countForzata: countForzata,
allowItemCountDecrease: false,
}
object.NextInt()
return object
}
// Return the next value, skewed by the zipfian distribution. The 0th item will
// be the most popular, followed by the 1st, followed by the 2nd, etc.
// (or, if min != 0, the min-th item is the most popular, the min+1th item
// the next most popular, etc.) If you want the popular items
// scattered throughout the item space, use ScrambledZipfianGenerator instead.
func (self *ZipfianGenerator) NextInt() int64 {
return self.Next(self.items)
}
// Return the next value, skewed by the zipfian distribution. The 0th item will
// be the most popular, followed by the 1st, followed by the 2nd, etc.
// (same as NextInt())
func (self *ZipfianGenerator) NextLong() int64 {
return self.Next(self.items)
}
// Generate the next item. this distribution will be skewed toward
// lower itegers; e.g. 0 will be the most popular, 1 the next most popular, etc.
func (self *ZipfianGenerator) Next(itemCount int64) int64 {
var ret int64
defer func(r *int64) {
self.IntegerGeneratorBase.SetLastInt(*r)
}(&ret)
if itemCount != self.countForzata |
u := NextFloat64()
uz := u * self.zetan
if uz < 1.0 {
ret = self.base
return ret
}
if uz < 1.0+math.Pow(0.5, self.theta) {
ret = self.base + 1
return ret
}
ret = self.base + int64(float64(itemCount)*math.Pow(self.eta*u-self.eta+1.0, self.alpha))
return ret
}
func (self *ZipfianGenerator) NextString() string {
return self.IntegerGeneratorBase.NextString(self)
}
func (self *ZipfianGenerator) Mean() float64 {
panic("unsupported operation")
}
var (
Zetan = float64(26.46902820178302)
UsedZipfianConstant = float64(0.99)
ItemCount = float64(10000000000)
)
// A generator of a zipfian distribution. It produces a sequence of items,
// such that some items are more popular than others, according to a zipfian
// distribution. When you construct an instance of this class, you specify
// the number of items in the set to draw from, either by specifying
// an itemCount(so that the sequence is of items from 0 to itemCount-1) or
// by specifying a min and a max (so that the sequence is of items from min
// to max inclusive). After you construct the instance, you can change
// the number of items by calling NextInt(itemCount) or Next(itemCount).
// Unlike ZipfianGenerator, this class scatters the "popular" items across
// the item space. Use this, instead of ZipfianGenerator, if you don't want
// the head of the distribution(the popular items) clustered together.
type ScrambledZipfianGenerator struct {
*IntegerGeneratorBase
gen *ZipfianGenerator
min int64
max int64
itemCount int64
}
// Create a zipfian generator for the specified number of items.
func NewScrambledZipfianGeneratorByItems(items int64) *ScrambledZipfianGenerator {
return NewScrambledZipfianGenerator(0, items-1)
}
// Create a zipfian generator for items between min and max (inclusive) for
// the specified zipfian constant. If you use a zipfian constant other than
// 0.99, this will take a long time complete because we need to recompute
// zeta.
func NewScrambledZipfianGeneratorConstant(min, max int64, constant float64) *ScrambledZipfianGenerator {
var gen *ZipfianGenerator
itemCount := max - min + 1
if constant == UsedZipfianConstant {
gen = NewZipfianGenerator(0, itemCount, constant, Zetan)
} else {
zeta := zetaStatic(0, itemCount, constant, 0)
gen = NewZipfianGenerator(0, itemCount, constant, zeta)
}
return &ScrambledZipfianGenerator{
IntegerGeneratorBase: NewIntegerGeneratorBase(min),
gen: gen,
min: min,
max: max,
itemCount: max - min + 1,
}
}
// Create a zipfian generator for items between min and max(inclusive).
func NewScrambledZipfianGenerator(min, max int64) *ScrambledZipfianGenerator {
return NewScrambledZipfianGeneratorConstant(min, max, ZipfianConstant)
}
// Return the next int in the sequence.
func (self *ScrambledZipfianGenerator) NextInt() int64 {
return self.Next()
}
// return the next item in the sequence.
func (self *ScrambledZipfianGenerator) Next() int64 {
ret := self.gen.NextLong()
ret = self.min + int64(FNVHash64(uint64(ret))%uint64(self.itemCount))
self.SetLastInt(ret)
return ret
}
func (self *ScrambledZipfianGenerator) NextString() string {
return self.IntegerGeneratorBase.NextString(self)
}
// Since the values are scrambed (hopefully uniformly), the mean is simply
// the middle of the range.
func (self *ScrambledZipfianGenerator) Mean() float64 {
return float64(self.min+self.max) / 2.0
}
// Hash a integer value.
func Hash(value int64) uint64 {
return FNVHash64(uint64(value))
}
const (
FNVOffsetBasis32 = uint32(0x811c9dc5)
FNVPrime32 = uint32(16777619)
)
// 32 bit FNV hash.
// Refer to http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
func FNVHash32(value uint32) uint32 {
hash := FNVOffsetBasis32
for i := 0; i < 4; i++ {
octet := value & 0x00FF
value >>= 8
hash ^= octet
hash *= FNVPrime32
}
return hash
}
const (
FNVOffsetBasis64 = uint64(0xCBF29CE484222325)
FNVPrime64 = uint64(1099511628211)
)
// 64 bit FNV hash.
// Refer to http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
func FNVHash64(value uint64) uint64 {
hash := FNVOffsetBasis64
for i := 0; i < 8; i++ {
octet := value & 0x00FF
value >>= 8
hash ^= octet
hash *= FNVPrime64
}
return hash
}
| {
if itemCount > self.countForzata {
self.countForzata, self.zetan = zeta(self.countForzata, itemCount, self.theta, self.zetan)
self.eta = (1 - math.Pow(float64(2.0/self.items), 1-self.theta)) / (1 - self.zeta2theta/self.zetan)
} else if (itemCount < self.countForzata) && (self.allowItemCountDecrease) {
self.countForzata, self.zetan = zeta(0, itemCount, self.theta, 0)
self.eta = (1 - math.Pow(float64(2.0/self.items), 1-self.theta)) / (1 - self.zeta2theta/self.zetan)
}
} | conditional_block |
zipfian_generator.go | package generator
import (
"math"
)
const (
ZipfianConstant = float64(0.99)
)
// Compute the zeta constant needed for the distribution.
// Do this incrementally for a distribution that has n items now
// but used to have st items. Use the zipfian constant theta.
// Remember the new value of n so that if we change the itemCount,
// we'll know to recompute zeta.
func zeta(st, n int64, theta, initialSum float64) (int64, float64) |
// Compute the zeta constant needed for the distribution. Do this incrementally
// for a distribution that has h items now but used to have st items.
// Use the zipfian constant theta. Remember the new value of n so that
// if we change itemCount, we'll know to recompute zeta.
func zetaStatic(st, n int64, theta, initialSum float64) float64 {
sum := initialSum
for i := st; i < n; i++ {
sum += 1 / math.Pow(float64(i+1), theta)
}
return sum
}
// A generator of a zipfian distribution. It produces a sequence of items,
// such that some items are more popular than others, according to
// a zipfian distribution. When you construct an instance of this class,
// you specify the number of items in the set to draw from, either by
// specifying an itemcount (so that the sequence is of items from 0 to
// itemcount-1) or by specifying a min and a max (so that the sequence
// is of items from min to max inclusive). After you construct the instance,
// you can change the number of items by calling NextInt() or nextLong().
//
// Note that the popular items will be clustered together, e.g. item 0
// is the most popular, item 1 the second most popular, and so on (or min
// is the most popular, min+1 the next most popular, etc.)
// If you don't want this clustering, and instead want the popular items
// scattered throughout the item space, then use ScrambledZipfianGenerator
// instead.
//
// Be aware: initializing this generator may take a long time if there are
// lots of items to choose from (e.g. over a minute for 100 million objects).
// This is because certain mathematical values need to be computed to properly
// generate a zipfian skew, and one of those values (zeta) is a sum sequence
// from 1 to n, where n is the itemCount. Note that if you increase the number
// of items in the set, we can compute a new zeta incrementally, so it should
// be fast unless you have added millions of items. However, if you decrease
// the number of items, we recompute zeta from scratch, so this can take
// a long time.
//
// The algorithm used here is from
// "Quickly Generating Billion-Record Synthetic Databases",
// Jim Gray et al, SIGMOD 1994.
//
type ZipfianGenerator struct {
*IntegerGeneratorBase
// Number of items.
items int64
// Min item to generate.
base int64
// The zipfian constant to use.
zipfianConstant float64
// Computed parameters for generating the distribution.
alpha, zetan, eta, theta, zeta2theta float64
// The number of items used to compute zetan the last time.
countForzata int64
// Flag to prevent problems. If you increase the number of items which
// the zipfian generator is allowed to choose from, this code will
// incrementally compute a new zeta value for the larger itemcount.
// However, if you decrease the number of items, the code computes
// zeta from scratch; this is expensive for large itemsets.
// Usually this is not intentional; e.g. one goroutine thinks
// the number of items is 1001 and calls "NextLong()" with that item count;
// then another goroutine who thinks the number of items is 1000 calls
// NextLong() with itemCount=1000 triggering the expensive recomputation.
// (It is expensive for 100 million items, not really for 1000 items.)
// Why did the second goroutine think there were only 1000 items?
// maybe it read the item count before the first goroutine incremented it.
// So this flag allows you to say if you really do want that recomputation.
// If true, then the code will recompute zeta if the itemcount goes down.
// If false, the code will assume itemcount only goes up, and never
// recompute.
allowItemCountDecrease bool
}
// Create a zipfian generator for items between min and max(inclusive).
func NewZipfianGeneratorByInterval(min, max int64) *ZipfianGenerator {
zeta := zetaStatic(min, max-min+1, ZipfianConstant, 0)
return NewZipfianGenerator(min, max, ZipfianConstant, zeta)
}
// Create a zipfian generator for items between min and max(inclusive) for
// the specified zipfian constant, using the precomputed value of zeta.
func NewZipfianGenerator(
min, max int64, zipfianConstant, zetan float64) *ZipfianGenerator {
items := max - min + 1
base := min
theta := zipfianConstant
countForzata, zeta2theta := zeta(0, 2, theta, 0)
alpha := 1.0 / (1.0 - theta)
countForzata = items
eta := (1 - math.Pow(2.0/float64(items), 1-theta)) / (1 - zeta2theta/zetan)
object := &ZipfianGenerator{
IntegerGeneratorBase: NewIntegerGeneratorBase(0),
items: items,
base: base,
zipfianConstant: zipfianConstant,
alpha: alpha,
zetan: zetan,
eta: eta,
theta: theta,
zeta2theta: zeta2theta,
countForzata: countForzata,
allowItemCountDecrease: false,
}
object.NextInt()
return object
}
// Return the next value, skewed by the zipfian distribution. The 0th item will
// be the most popular, followed by the 1st, followed by the 2nd, etc.
// (or, if min != 0, the min-th item is the most popular, the min+1th item
// the next most popular, etc.) If you want the popular items
// scattered throughout the item space, use ScrambledZipfianGenerator instead.
func (self *ZipfianGenerator) NextInt() int64 {
return self.Next(self.items)
}
// Return the next value, skewed by the zipfian distribution. The 0th item will
// be the most popular, followed by the 1st, followed by the 2nd, etc.
// (same as NextInt())
func (self *ZipfianGenerator) NextLong() int64 {
return self.Next(self.items)
}
// Generate the next item. this distribution will be skewed toward
// lower itegers; e.g. 0 will be the most popular, 1 the next most popular, etc.
func (self *ZipfianGenerator) Next(itemCount int64) int64 {
var ret int64
defer func(r *int64) {
self.IntegerGeneratorBase.SetLastInt(*r)
}(&ret)
if itemCount != self.countForzata {
if itemCount > self.countForzata {
self.countForzata, self.zetan = zeta(self.countForzata, itemCount, self.theta, self.zetan)
self.eta = (1 - math.Pow(float64(2.0/self.items), 1-self.theta)) / (1 - self.zeta2theta/self.zetan)
} else if (itemCount < self.countForzata) && (self.allowItemCountDecrease) {
self.countForzata, self.zetan = zeta(0, itemCount, self.theta, 0)
self.eta = (1 - math.Pow(float64(2.0/self.items), 1-self.theta)) / (1 - self.zeta2theta/self.zetan)
}
}
u := NextFloat64()
uz := u * self.zetan
if uz < 1.0 {
ret = self.base
return ret
}
if uz < 1.0+math.Pow(0.5, self.theta) {
ret = self.base + 1
return ret
}
ret = self.base + int64(float64(itemCount)*math.Pow(self.eta*u-self.eta+1.0, self.alpha))
return ret
}
func (self *ZipfianGenerator) NextString() string {
return self.IntegerGeneratorBase.NextString(self)
}
func (self *ZipfianGenerator) Mean() float64 {
panic("unsupported operation")
}
var (
Zetan = float64(26.46902820178302)
UsedZipfianConstant = float64(0.99)
ItemCount = float64(10000000000)
)
// A generator of a zipfian distribution. It produces a sequence of items,
// such that some items are more popular than others, according to a zipfian
// distribution. When you construct an instance of this class, you specify
// the number of items in the set to draw from, either by specifying
// an itemCount(so that the sequence is of items from 0 to itemCount-1) or
// by specifying a min and a max (so that the sequence is of items from min
// to max inclusive). After you construct the instance, you can change
// the number of items by calling NextInt(itemCount) or Next(itemCount).
// Unlike ZipfianGenerator, this class scatters the "popular" items across
// the item space. Use this, instead of ZipfianGenerator, if you don't want
// the head of the distribution(the popular items) clustered together.
type ScrambledZipfianGenerator struct {
*IntegerGeneratorBase
gen *ZipfianGenerator
min int64
max int64
itemCount int64
}
// Create a zipfian generator for the specified number of items.
func NewScrambledZipfianGeneratorByItems(items int64) *ScrambledZipfianGenerator {
return NewScrambledZipfianGenerator(0, items-1)
}
// Create a zipfian generator for items between min and max (inclusive) for
// the specified zipfian constant. If you use a zipfian constant other than
// 0.99, this will take a long time complete because we need to recompute
// zeta.
func NewScrambledZipfianGeneratorConstant(min, max int64, constant float64) *ScrambledZipfianGenerator {
var gen *ZipfianGenerator
itemCount := max - min + 1
if constant == UsedZipfianConstant {
gen = NewZipfianGenerator(0, itemCount, constant, Zetan)
} else {
zeta := zetaStatic(0, itemCount, constant, 0)
gen = NewZipfianGenerator(0, itemCount, constant, zeta)
}
return &ScrambledZipfianGenerator{
IntegerGeneratorBase: NewIntegerGeneratorBase(min),
gen: gen,
min: min,
max: max,
itemCount: max - min + 1,
}
}
// Create a zipfian generator for items between min and max(inclusive).
func NewScrambledZipfianGenerator(min, max int64) *ScrambledZipfianGenerator {
return NewScrambledZipfianGeneratorConstant(min, max, ZipfianConstant)
}
// Return the next int in the sequence.
func (self *ScrambledZipfianGenerator) NextInt() int64 {
return self.Next()
}
// return the next item in the sequence.
func (self *ScrambledZipfianGenerator) Next() int64 {
ret := self.gen.NextLong()
ret = self.min + int64(FNVHash64(uint64(ret))%uint64(self.itemCount))
self.SetLastInt(ret)
return ret
}
func (self *ScrambledZipfianGenerator) NextString() string {
return self.IntegerGeneratorBase.NextString(self)
}
// Since the values are scrambed (hopefully uniformly), the mean is simply
// the middle of the range.
func (self *ScrambledZipfianGenerator) Mean() float64 {
return float64(self.min+self.max) / 2.0
}
// Hash a integer value.
func Hash(value int64) uint64 {
return FNVHash64(uint64(value))
}
const (
FNVOffsetBasis32 = uint32(0x811c9dc5)
FNVPrime32 = uint32(16777619)
)
// 32 bit FNV hash.
// Refer to http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
func FNVHash32(value uint32) uint32 {
hash := FNVOffsetBasis32
for i := 0; i < 4; i++ {
octet := value & 0x00FF
value >>= 8
hash ^= octet
hash *= FNVPrime32
}
return hash
}
const (
FNVOffsetBasis64 = uint64(0xCBF29CE484222325)
FNVPrime64 = uint64(1099511628211)
)
// 64 bit FNV hash.
// Refer to http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
func FNVHash64(value uint64) uint64 {
hash := FNVOffsetBasis64
for i := 0; i < 8; i++ {
octet := value & 0x00FF
value >>= 8
hash ^= octet
hash *= FNVPrime64
}
return hash
}
| {
countForzata := n
return countForzata, zetaStatic(st, n, theta, initialSum)
} | identifier_body |
zipfian_generator.go | package generator
import (
"math"
)
const (
ZipfianConstant = float64(0.99)
)
// Compute the zeta constant needed for the distribution.
// Do this incrementally for a distribution that has n items now
// but used to have st items. Use the zipfian constant theta.
// Remember the new value of n so that if we change the itemCount,
// we'll know to recompute zeta.
func zeta(st, n int64, theta, initialSum float64) (int64, float64) {
countForzata := n
return countForzata, zetaStatic(st, n, theta, initialSum)
}
// Compute the zeta constant needed for the distribution. Do this incrementally
// for a distribution that has h items now but used to have st items.
// Use the zipfian constant theta. Remember the new value of n so that
// if we change itemCount, we'll know to recompute zeta.
func zetaStatic(st, n int64, theta, initialSum float64) float64 {
sum := initialSum
for i := st; i < n; i++ {
sum += 1 / math.Pow(float64(i+1), theta)
}
return sum
}
// A generator of a zipfian distribution. It produces a sequence of items,
// such that some items are more popular than others, according to
// a zipfian distribution. When you construct an instance of this class,
// you specify the number of items in the set to draw from, either by
// specifying an itemcount (so that the sequence is of items from 0 to
// itemcount-1) or by specifying a min and a max (so that the sequence
// is of items from min to max inclusive). After you construct the instance,
// you can change the number of items by calling NextInt() or nextLong().
//
// Note that the popular items will be clustered together, e.g. item 0
// is the most popular, item 1 the second most popular, and so on (or min
// is the most popular, min+1 the next most popular, etc.)
// If you don't want this clustering, and instead want the popular items
// scattered throughout the item space, then use ScrambledZipfianGenerator
// instead.
//
// Be aware: initializing this generator may take a long time if there are
// lots of items to choose from (e.g. over a minute for 100 million objects).
// This is because certain mathematical values need to be computed to properly
// generate a zipfian skew, and one of those values (zeta) is a sum sequence
// from 1 to n, where n is the itemCount. Note that if you increase the number
// of items in the set, we can compute a new zeta incrementally, so it should
// be fast unless you have added millions of items. However, if you decrease
// the number of items, we recompute zeta from scratch, so this can take
// a long time.
//
// The algorithm used here is from
// "Quickly Generating Billion-Record Synthetic Databases",
// Jim Gray et al, SIGMOD 1994.
//
type ZipfianGenerator struct {
*IntegerGeneratorBase
// Number of items.
items int64
// Min item to generate.
base int64
// The zipfian constant to use.
zipfianConstant float64
// Computed parameters for generating the distribution.
alpha, zetan, eta, theta, zeta2theta float64
// The number of items used to compute zetan the last time.
countForzata int64
// Flag to prevent problems. If you increase the number of items which
// the zipfian generator is allowed to choose from, this code will
// incrementally compute a new zeta value for the larger itemcount.
// However, if you decrease the number of items, the code computes
// zeta from scratch; this is expensive for large itemsets.
// Usually this is not intentional; e.g. one goroutine thinks
// the number of items is 1001 and calls "NextLong()" with that item count;
// then another goroutine who thinks the number of items is 1000 calls
// NextLong() with itemCount=1000 triggering the expensive recomputation.
// (It is expensive for 100 million items, not really for 1000 items.)
// Why did the second goroutine think there were only 1000 items?
// maybe it read the item count before the first goroutine incremented it.
// So this flag allows you to say if you really do want that recomputation.
// If true, then the code will recompute zeta if the itemcount goes down.
// If false, the code will assume itemcount only goes up, and never
// recompute.
allowItemCountDecrease bool
}
// Create a zipfian generator for items between min and max(inclusive).
func NewZipfianGeneratorByInterval(min, max int64) *ZipfianGenerator {
zeta := zetaStatic(min, max-min+1, ZipfianConstant, 0)
return NewZipfianGenerator(min, max, ZipfianConstant, zeta)
}
// Create a zipfian generator for items between min and max(inclusive) for
// the specified zipfian constant, using the precomputed value of zeta.
func NewZipfianGenerator(
min, max int64, zipfianConstant, zetan float64) *ZipfianGenerator {
items := max - min + 1
base := min
theta := zipfianConstant
countForzata, zeta2theta := zeta(0, 2, theta, 0)
alpha := 1.0 / (1.0 - theta)
countForzata = items
eta := (1 - math.Pow(2.0/float64(items), 1-theta)) / (1 - zeta2theta/zetan)
object := &ZipfianGenerator{
IntegerGeneratorBase: NewIntegerGeneratorBase(0),
items: items,
base: base,
zipfianConstant: zipfianConstant,
alpha: alpha,
zetan: zetan,
eta: eta,
theta: theta,
zeta2theta: zeta2theta,
countForzata: countForzata,
allowItemCountDecrease: false,
}
object.NextInt()
return object
}
// Return the next value, skewed by the zipfian distribution. The 0th item will
// be the most popular, followed by the 1st, followed by the 2nd, etc.
// (or, if min != 0, the min-th item is the most popular, the min+1th item
// the next most popular, etc.) If you want the popular items
// scattered throughout the item space, use ScrambledZipfianGenerator instead.
func (self *ZipfianGenerator) NextInt() int64 {
return self.Next(self.items)
}
// Return the next value, skewed by the zipfian distribution. The 0th item will
// be the most popular, followed by the 1st, followed by the 2nd, etc.
// (same as NextInt())
func (self *ZipfianGenerator) NextLong() int64 {
return self.Next(self.items)
}
// Generate the next item. this distribution will be skewed toward
// lower itegers; e.g. 0 will be the most popular, 1 the next most popular, etc.
func (self *ZipfianGenerator) Next(itemCount int64) int64 {
var ret int64
defer func(r *int64) {
self.IntegerGeneratorBase.SetLastInt(*r)
}(&ret)
if itemCount != self.countForzata {
if itemCount > self.countForzata {
self.countForzata, self.zetan = zeta(self.countForzata, itemCount, self.theta, self.zetan)
self.eta = (1 - math.Pow(float64(2.0/self.items), 1-self.theta)) / (1 - self.zeta2theta/self.zetan)
} else if (itemCount < self.countForzata) && (self.allowItemCountDecrease) {
self.countForzata, self.zetan = zeta(0, itemCount, self.theta, 0)
self.eta = (1 - math.Pow(float64(2.0/self.items), 1-self.theta)) / (1 - self.zeta2theta/self.zetan)
}
}
u := NextFloat64()
uz := u * self.zetan
if uz < 1.0 {
ret = self.base
return ret
}
if uz < 1.0+math.Pow(0.5, self.theta) {
ret = self.base + 1
return ret
}
ret = self.base + int64(float64(itemCount)*math.Pow(self.eta*u-self.eta+1.0, self.alpha))
return ret
}
func (self *ZipfianGenerator) NextString() string {
return self.IntegerGeneratorBase.NextString(self)
}
func (self *ZipfianGenerator) Mean() float64 {
panic("unsupported operation")
}
var (
Zetan = float64(26.46902820178302)
UsedZipfianConstant = float64(0.99)
ItemCount = float64(10000000000)
)
// A generator of a zipfian distribution. It produces a sequence of items,
// such that some items are more popular than others, according to a zipfian
// distribution. When you construct an instance of this class, you specify
// the number of items in the set to draw from, either by specifying
// an itemCount(so that the sequence is of items from 0 to itemCount-1) or
// by specifying a min and a max (so that the sequence is of items from min
// to max inclusive). After you construct the instance, you can change
// the number of items by calling NextInt(itemCount) or Next(itemCount).
// Unlike ZipfianGenerator, this class scatters the "popular" items across
// the item space. Use this, instead of ZipfianGenerator, if you don't want
// the head of the distribution(the popular items) clustered together.
type ScrambledZipfianGenerator struct {
*IntegerGeneratorBase
gen *ZipfianGenerator
min int64
max int64
itemCount int64
}
// Create a zipfian generator for the specified number of items.
func | (items int64) *ScrambledZipfianGenerator {
return NewScrambledZipfianGenerator(0, items-1)
}
// Create a zipfian generator for items between min and max (inclusive) for
// the specified zipfian constant. If you use a zipfian constant other than
// 0.99, this will take a long time complete because we need to recompute
// zeta.
func NewScrambledZipfianGeneratorConstant(min, max int64, constant float64) *ScrambledZipfianGenerator {
var gen *ZipfianGenerator
itemCount := max - min + 1
if constant == UsedZipfianConstant {
gen = NewZipfianGenerator(0, itemCount, constant, Zetan)
} else {
zeta := zetaStatic(0, itemCount, constant, 0)
gen = NewZipfianGenerator(0, itemCount, constant, zeta)
}
return &ScrambledZipfianGenerator{
IntegerGeneratorBase: NewIntegerGeneratorBase(min),
gen: gen,
min: min,
max: max,
itemCount: max - min + 1,
}
}
// Create a zipfian generator for items between min and max(inclusive).
func NewScrambledZipfianGenerator(min, max int64) *ScrambledZipfianGenerator {
return NewScrambledZipfianGeneratorConstant(min, max, ZipfianConstant)
}
// Return the next int in the sequence.
func (self *ScrambledZipfianGenerator) NextInt() int64 {
return self.Next()
}
// return the next item in the sequence.
func (self *ScrambledZipfianGenerator) Next() int64 {
ret := self.gen.NextLong()
ret = self.min + int64(FNVHash64(uint64(ret))%uint64(self.itemCount))
self.SetLastInt(ret)
return ret
}
func (self *ScrambledZipfianGenerator) NextString() string {
return self.IntegerGeneratorBase.NextString(self)
}
// Since the values are scrambed (hopefully uniformly), the mean is simply
// the middle of the range.
func (self *ScrambledZipfianGenerator) Mean() float64 {
return float64(self.min+self.max) / 2.0
}
// Hash a integer value.
func Hash(value int64) uint64 {
return FNVHash64(uint64(value))
}
const (
FNVOffsetBasis32 = uint32(0x811c9dc5)
FNVPrime32 = uint32(16777619)
)
// 32 bit FNV hash.
// Refer to http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
func FNVHash32(value uint32) uint32 {
hash := FNVOffsetBasis32
for i := 0; i < 4; i++ {
octet := value & 0x00FF
value >>= 8
hash ^= octet
hash *= FNVPrime32
}
return hash
}
const (
FNVOffsetBasis64 = uint64(0xCBF29CE484222325)
FNVPrime64 = uint64(1099511628211)
)
// 64 bit FNV hash.
// Refer to http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
func FNVHash64(value uint64) uint64 {
hash := FNVOffsetBasis64
for i := 0; i < 8; i++ {
octet := value & 0x00FF
value >>= 8
hash ^= octet
hash *= FNVPrime64
}
return hash
}
| NewScrambledZipfianGeneratorByItems | identifier_name |
mapnificent.js | /*
Mapnificent - transit shed (travel time) visualisations
Copyright (C) 2015 Stefan Wehrmeyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* globals $, Quadtree, console, L, dcodeIO */
(function(){
'use strict';
function getProgressBar(percent) {
return $('<div class="progress">' +
'<div class="progress-bar progress-bar-mapnificent" role="progressbar" aria-valuenow="' + percent + '" aria-valuemin="0" aria-valuemax="100" style="width: ' + percent + '%">' +
'<span class="sr-only">' + percent + '% Complete</span>' +
'</div></div>');
}
function updateProgressBar(progressBar, percent) |
function MapnificentPosition(mapnificent, latlng, time) {
this.mapnificent = mapnificent;
this.latlng = latlng;
this.stationMap = null;
this.progress = 0;
this.time = time === undefined ? 15 * 60 : time;
this.init();
}
MapnificentPosition.prototype.init = function(){
var self = this;
// this.marker = new L.Marker(this.latlng, {
// draggable: true,
// opacity: 0.5
// });
// this.popup = new L.Popup({
// minWidth: 200
// });
// this.marker
// .bindPopup(this.popup)
// .addTo(this.mapnificent.map);
// this.marker.on('dragend', function(){
// self.updatePosition(self.marker.getLatLng());
// });
this.startCalculation();
};
// MapnificentPosition.prototype.updatePosition = function(latlng, time){
// var needsRedraw = false, needsRecalc = false;
// if (time !== undefined) {
// if (time !== this.time) {
// needsRedraw = true;
// }
// this.time = time;
// }
// if (this.latlng.lat !== latlng.lat || this.latlng.lng !== latlng.lng) {
// needsRecalc = true;
// needsRedraw = true;
// }
// this.latlng = latlng;
// if (needsRecalc) {
// this.marker.setLatLng(this.latlng);
// this.stationMap = null;
// this.progress = 0;
// this.startCalculation();
// this.marker.openPopup();
// }
// if (needsRedraw) {
// this.mapnificent.redraw();
// }
// if (needsRedraw || needsRecalc) {
// this.mapnificent.triggerHashUpdate();
// }
// };
MapnificentPosition.prototype.updateProgress = function(percent){
var addClass = '';
if (percent === undefined) {
var max = this.mapnificent.settings.options.estimatedMaxCalculateCalls || 100000;
percent = this.progress / max * 100;
if (percent > 99){
percent = 99;
addClass = 'progress-striped active';
}
}
this.callback(percent);
// this.marker.setOpacity(Math.max(0.5, percent / 100));
// $(this.popup.getContent()).find('.progress').addClass(addClass);
// updateProgressBar($(this.popup.getContent()), percent);
// this.popup.update();
};
// MapnificentPosition.prototype.renderProgress = function() {
// var div = $('<div class="position-control">'), self = this;
// var percent = 0;
// var progressBar = getProgressBar(percent);
// div.append(progressBar);
// var removeSpan = $('<span class="position-remove glyphicon glyphicon-trash pull-right">').on('click', function(){
// self.mapnificent.removePosition(self);
// });
// div.append(removeSpan);
// this.popup.setContent(div[0]);
// };
MapnificentPosition.prototype.setTime = function(time) {
if (time !== this.time) {
this.time = time;
this.mapnificent.redraw();
this.mapnificent.triggerHashUpdate();
}
};
// MapnificentPosition.prototype.updateControls = function(){
// var self = this;
// var div = $('<div class="position-control">');
// var minutesTime = Math.round(this.time / 60);
// var input = $('<input type="range">').attr({
// max: Math.round(this.mapnificent.settings.options.maxWalkTravelTime / 60),
// min: 0,
// value: minutesTime
// }).on('change', function(){
// self.setTime(parseInt($(this).val()) * 60);
// }).on('mousemove keyup', function(){
// $(self.popup.getContent()).find('.time-display').text($(this).val() + ' min');
// if (self.mapnificent.settings.redrawOnTimeDrag) {
// self.setTime(parseInt($(this).val()) * 60);
// }
// });
// div.append(input);
// var timeSpan = $('<div class="pull-left">' +
// '<span class="glyphicon glyphicon-time"></span> ' +
// '<span class="time-display">' + minutesTime + ' min</span></div>');
// div.append(timeSpan);
// var removeSpan = $('<span class="position-remove glyphicon glyphicon-trash pull-right">').on('click', function(){
// self.mapnificent.removePosition(self);
// });
// div.append(removeSpan);
// this.popup.setContent(div[0]);
// };
MapnificentPosition.prototype.createWorker = function(){
if (this.webworker) {
return this.webworker;
}
// this.webworker = new window.Worker(this.mapnificent.settings.baseurl + 'static/js/mapnificentworker.js');
/* new start */
this.webworker = new window.Worker(this.mapnificent.settings.baseurl + 'mapnificentworker.js');
/* new end */
this.webworker.onmessage = this.workerMessage();
this.webworker.onerror = this.workerError;
};
MapnificentPosition.prototype.workerMessage = function() {
var self = this;
return function(event){
if (event.data.status === 'working') {
self.progress = event.data.at;
// self.updateProgress();
}
else if (event.data.status === 'done') {
console.log('Count loops', event.data.count);
// self.updateProgress(100);
// self.updateControls();
self.stationMap = event.data.stationMap;
self.debugMap = event.data.debugMap;
self.mapnificent.redraw();
}
};
};
MapnificentPosition.prototype.workerError = function(){
return function(event){
console.log('error', event);
};
};
MapnificentPosition.prototype.startCalculation = function(){
// this.renderProgress();
// this.marker.openPopup();
this.createWorker();
this.webworker.postMessage({
lat: this.latlng.lat,
lng: this.latlng.lng,
// fromStations: nextStations.map(function(m){ return m[0].id; }),
stations: this.mapnificent.stationList,
lines: this.mapnificent.lines,
// distances: nextStations.map(function(m){ return m[1] / 1000; }),
reportInterval: 5000,
intervalKey: this.mapnificent.settings.intervalKey,
maxWalkTime: this.mapnificent.settings.maxWalkTime,
secondsPerM: this.mapnificent.settings.secondsPerKm / 1000,
searchRadius: this.mapnificent.settings.initialStationSearchRadius,
bounds: this.mapnificent.settings.bounds,
debug: this.mapnificent.settings.debug,
});
};
MapnificentPosition.prototype.getReachableStations = function(stationsAround, start, tileSize) {
var self = this;
var getLngRadius = function(lat, mradius){
var equatorLength = 40075017,
hLength = equatorLength * Math.cos(L.LatLng.DEG_TO_RAD * lat);
return (mradius / hLength) * 360;
};
var maxWalkTime = this.mapnificent.settings.maxWalkTime;
var secondsPerKm = this.mapnificent.settings.secondsPerKm;
var convert = function(station, reachableIn) {
var secs = Math.min((self.time - reachableIn), maxWalkTime);
var mradius = secs * (1 / secondsPerKm) * 1000;
var point = new L.LatLng(station.lat, station.lng);
var lngRadius = getLngRadius(station.lat, mradius);
var latlng2 = new L.LatLng(station.lat, station.lng - lngRadius, true);
var point2 = self.mapnificent.map.latLngToLayerPoint(latlng2);
var lpoint = self.mapnificent.map.latLngToLayerPoint(point);
var radius = Math.max(Math.round(lpoint.x - point2.x), 1);
var p = self.mapnificent.map.project(point);
var x = Math.round(p.x - start.x);
var y = Math.round(p.y - start.y);
if (x + radius < 0 || x - radius > tileSize ||
y + radius < 0 || y - radius > tileSize) {
return null;
}
return {x: x, y: y, r: radius};
};
var stations = [];
if (this.stationMap === null) {
return stations;
}
// You start walking from your position
var station = convert(this.latlng, 0);
if (station !== null) {
stations.push(station);
}
for (var i = 0; i < stationsAround.length; i += 1) {
var stationTime = this.stationMap[stationsAround[i].id];
if (stationTime === undefined || stationTime >= this.time) {
continue;
}
station = convert(stationsAround[i], stationTime);
if (station !== null) {
stations.push(station);
}
}
return stations;
};
MapnificentPosition.prototype.destroy = function(){
// this.mapnificent.map.closePopup(this.popup);
// this.mapnificent.map.removeLayer(this.popup);
// this.mapnificent.map.removeLayer(this.marker);
this.webworker.terminate();
this.webworker = null;
this.stationMap = null;
// this.marker = null;
// this.popup = null;
this.redrawTime = 0;
};
function Mapnificent(map, city, pageConfig){
this.map = map;
this.positions = [];
this.time = 60 * 10;
// FIXME: this is messy
this.city = city;
this.settings = $.extend({
intervalKey: '1-6',
baseurl: '/',
dataPath: city.dataPath || './',
maxWalkTime: 15 * 60,
secondsPerKm: 13 * 60,
initialStationSearchRadius: 1000,
redrawOnTimeDrag: false,
debug: window.location.search.indexOf("debug") !== -1,
}, city);
this.settings.options = $.extend({
maxWalkTravelTime: 1.5 * 60 * 60,
}, this.settings.options)
this.settings = $.extend(this.settings, pageConfig);
}
Mapnificent.prototype.init = function(){
var self = this, t0;
self.tilesLoading = false;
return this.loadData().done(function(data){
self.prepareData(data);
self.canvasTileLayer = L.tileLayer.canvas();
self.canvasTileLayer.on('loading', function(){
self.tilesLoading = true;
t0 = new Date().getTime();
});
self.canvasTileLayer.on('load', function(){
self.tilesLoading = false;
if (self.needsRedraw) {
self.redraw();
}
self.redrawTime = (new Date().getTime()) - t0;
console.log('reloading tile layer took', self.redrawTime, 'ms');
});
self.canvasTileLayer.drawTile = self.drawTile();
self.map.addLayer(self.canvasTileLayer);
// self.map.on('click', function(e) {
// self.addPosition(e.latlng);
// });
// self.map.on('contextmenu', function(e) {
// if (self.settings.debug) {
// self.logDebugMessage(e.latlng);
// }
// });
// self.augmentLeafletHash();
if (self.settings.coordinates) {
// self.hash.update();
if (self.positions.length === 0) {
self.addPosition(L.latLng(
self.settings.coordinates[1],
self.settings.coordinates[0]
));
}
}
});
};
Mapnificent.prototype.logDebugMessage = function(latlng) {
var self = this;
var stationsAround = this.quadtree.searchInRadius(latlng.lat, latlng.lng, 300);
this.positions.forEach(function(pos, i){
console.log('Position ', i);
if (pos.debugMap === undefined) {
console.log('No debug map present');
}
stationsAround.forEach(function(station, j){
var lastTransport;
console.log('Found station', station.Name);
if (pos.debugMap[station.id] === undefined) {
console.log('Not reached');
return;
}
var totalTime = 0
pos.debugMap[station.id].forEach(function(stop, k){
var fromName = '$walking'
var distance
var toStop = self.stationList[stop.to]
if (stop.from !== -1) {
var fromStop = self.stationList[stop.from]
fromName = fromStop.Name
distance = self.quadtree.distanceBetweenCoordinates(
fromStop.Latitude, fromStop.Longitude,
toStop.Latitude, toStop.Longitude
)
}
if (lastTransport != stop.line) {
console.log(k, 'Switching transport to', self.lineNames[stop.line],
'waiting: ', stop.waittime);
}
lastTransport = stop.line;
var currentTime = stop.time - totalTime;
totalTime = stop.time;
console.log(k, fromName, '->',
toStop.Name,
'via', self.lineNames[stop.line],
'in', currentTime,
' (' +
'stay: ' + stop.stay +
', total time: ' + stop.time +
', total walk time: ' + stop.walkTime +
', distance: ' + distance +' meters)');
});
});
});
};
Mapnificent.prototype.loadData = function(){
var dataUrl = this.settings.dataPath + this.settings.cityid;
if (this.settings.debug) {
dataUrl += '__debug';
}
dataUrl += '.bin';
const MAPNIFICENT_PROTO = {"nested":{"mapnificent":{"nested":{"MapnificentNetwork":{"fields":{"Cityid":{"type":"string","id":1},"Stops":{"rule":"repeated","type":"Stop","id":2},"Lines":{"rule":"repeated","type":"Line","id":3}},"nested":{"Stop":{"fields":{"Latitude":{"type":"double","id":1},"Longitude":{"type":"double","id":2},"TravelOptions":{"rule":"repeated","type":"TravelOption","id":3},"Name":{"type":"string","id":4}},"nested":{"TravelOption":{"fields":{"Stop":{"type":"uint32","id":1},"TravelTime":{"type":"uint32","id":2},"StayTime":{"type":"uint32","id":3},"Line":{"type":"string","id":4},"WalkDistance":{"type":"uint32","id":5}}}}},"Line":{"fields":{"LineId":{"type":"string","id":1},"LineTimes":{"rule":"repeated","type":"LineTime","id":2},"Name":{"type":"string","id":3}},"nested":{"LineTime":{"fields":{"Interval":{"type":"uint32","id":1},"Start":{"type":"uint32","id":2},"Stop":{"type":"uint32","id":3},"Weekday":{"type":"uint32","id":4}}}}}}}}}}};
var protoRoot = protobuf.Root.fromJSON(MAPNIFICENT_PROTO);
var d = $.Deferred();
// var loadProgress = $('#load-progress');
// var progressBar = getProgressBar(0.0);
// loadProgress.find('.modal-body').html(progressBar);
// loadProgress.modal('show');
var oReq = new XMLHttpRequest();
oReq.open("GET", dataUrl, true);
oReq.responseType = "arraybuffer";
oReq.onload = function(oEvent) {
var MapnificentNetwork = protoRoot.lookupType('mapnificent.MapnificentNetwork');
console.log('received binary', new Date().getTime());
var message = MapnificentNetwork.decode(new Uint8Array(oEvent.target.response));
console.log('decoded message', new Date().getTime());
// loadProgress.modal('hide');
d.resolve(message);
};
// oReq.addEventListener("progress", function updateProgress (oEvent) {
// if (oEvent.lengthComputable) {
// var percentComplete = oEvent.loaded / oEvent.total * 100;
// updateProgressBar(loadProgress, percentComplete);
// } else {
// updateProgressBar(loadProgress, 100);
// loadProgress.find('.progress').addClass('active progress-striped');
// }
// });
oReq.send();
return d;
};
Mapnificent.prototype.getLineTimesByInterval = function(lineTimes) {
var result = {};
for (var i = 0; i < lineTimes.length; i += 1) {
result[lineTimes[i].Weekday + '-' + lineTimes[i].Start] = lineTimes[i].Interval;
}
return result;
}
Mapnificent.prototype.prepareData = function(data) {
this.stationList = data.Stops;
this.lines = {};
this.lineNames = {};
var selat = Infinity, nwlat = -Infinity, nwlng = Infinity, selng = -Infinity;
for (var i = 0; i < this.stationList.length; i += 1){
this.stationList[i].id = i;
this.stationList[i].lat = data.Stops[i].Latitude;
this.stationList[i].lng = data.Stops[i].Longitude;
selat = Math.min(selat, this.stationList[i].lat);
nwlat = Math.max(nwlat, this.stationList[i].lat);
selng = Math.max(selng, this.stationList[i].lng);
nwlng = Math.min(nwlng, this.stationList[i].lng);
}
for (i = 0; i < data.Lines.length; i += 1) {
if (!data.Lines[i].LineTimes[0]) { continue; }
this.lines[data.Lines[i].LineId] = this.getLineTimesByInterval(data.Lines[i].LineTimes);
if (this.settings.debug) {
this.lineNames[data.Lines[i].LineId] = data.Lines[i].Name;
}
}
var b = 0.01;
this.settings.bounds = [selat - b, nwlat + b, nwlng - b, selng + b];
this.quadtree = Quadtree.create(
this.settings.bounds[0], this.settings.bounds[1],
this.settings.bounds[2], this.settings.bounds[3]
);
this.quadtree.insertAll(this.stationList);
};
Mapnificent.prototype.redraw = function(){
var self = this;
this.needsRedraw = true;
if (this.canvasTileLayer) {
if (this.tilesLoading) {
return;
}
L.Util.requestAnimFrame(function(){
self.needsRedraw = false;
self.canvasTileLayer.redraw();
});
}
};
Mapnificent.prototype.addPosition = function(latlng, time){
this.positions.push(new MapnificentPosition(this, latlng, time));
// this.triggerHashUpdate();
};
Mapnificent.prototype.removePosition = function(pos) {
this.positions = this.positions.filter(function(p){
return p !== pos;
});
pos.destroy();
this.redraw();
// this.triggerHashUpdate();
};
// Mapnificent.prototype.triggerHashUpdate = function() {
// this.hash.onMapMove();
// }
Mapnificent.prototype.drawTile = function() {
var self = this;
var maxWalkTime = this.settings.maxWalkTime;
var secondsPerKm = this.settings.secondsPerKm;
return function(canvas, tilePoint) {
if (!self.stationList || !self.positions.length) {
return;
}
var ctx = canvas.getContext('2d');
ctx.clearRect(0, 0, canvas.width, canvas.height);
/* Figure out how many stations we have to look at around
this tile.
*/
var tileSize = this.options.tileSize;
var start = tilePoint.multiplyBy(tileSize);
var end = start.add([tileSize, 0]);
var startLatLng = this._map.unproject(start);
var endLatLng = this._map.unproject(end);
var spanInMeters = startLatLng.distanceTo(endLatLng);
var maxWalkDistance = maxWalkTime * (1 / secondsPerKm) * 1000;
var middle = start.add([tileSize / 2, tileSize / 2]);
var latlng = this._map.unproject(middle);
var searchRadius = Math.sqrt(spanInMeters * spanInMeters + spanInMeters * spanInMeters);
searchRadius += maxWalkDistance;
var stationsAround = self.quadtree.searchInRadius(latlng.lat, latlng.lng, searchRadius);
ctx.globalCompositeOperation = 'source-over';
ctx.fillStyle = 'rgba(50,50,50,0.4)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.globalCompositeOperation = 'destination-out';
ctx.fillStyle = 'rgba(0,0,0,1)';
for (var i = 0; i < self.positions.length; i += 1) {
var drawStations = self.positions[i].getReachableStations(stationsAround, start, tileSize);
for (var j = 0; j < drawStations.length; j += 1) {
ctx.beginPath();
ctx.arc(drawStations[j].x, drawStations[j].y,
drawStations[j].r, 0, 2 * Math.PI, false);
ctx.fill();
}
}
};
};
Mapnificent.prototype.augmentLeafletHash = function() {
var mapnificent = this;
var formatHash = function(map) {
var center = map.getCenter(),
zoom = map.getZoom(),
precision = Math.max(0, Math.ceil(Math.log(zoom) / Math.LN2));
var params = [
zoom,
center.lat.toFixed(precision),
center.lng.toFixed(precision)
];
mapnificent.positions.forEach(function(pos) {
params.push(pos.time);
params.push(pos.latlng.lat.toFixed(precision));
params.push(pos.latlng.lng.toFixed(precision));
});
return "#" + params.join("/");
}
var parseHash = function(hash) {
if(hash.indexOf('#') === 0) {
hash = hash.substr(1);
}
var args = hash.split("/");
var parsed;
if (args.length < 3) {
return false;
}
var zoom = parseInt(args[0], 10),
lat = parseFloat(args[1]),
lon = parseFloat(args[2]);
if (isNaN(zoom) || isNaN(lat) || isNaN(lon)) {
parsed = false;
} else {
parsed = {
center: new L.LatLng(lat, lon),
zoom: zoom
};
}
var posIndex = 0;
for (var i = 3; i < args.length; i += 3) {
var time = parseInt(args[i], 10);
lat = parseFloat(args[i + 1]);
lon = parseFloat(args[i + 2]);
if (isNaN(time) || isNaN(lat) || isNaN(lon)) {
continue
}
if (mapnificent.positions[posIndex] === undefined) {
mapnificent.addPosition(new L.LatLng(lat, lon), time);
} else {
mapnificent.positions[posIndex].updatePosition(new L.LatLng(lat, lon), time);
}
posIndex += 1;
}
for (i = posIndex; i < mapnificent.positions.length; i += 1) {
mapnificent.removePosition(mapnificent.positions[i]);
}
return parsed;
};
L.Hash.prototype.formatHash = formatHash;
L.Hash.prototype.parseHash = parseHash;
this.hash = new L.Hash(this.map);
};
//
// onMapMove: function() {
// // bail if we're moving the map (updating from a hash),
// // or if the map is not yet loaded
//
// if (this.movingMap || !this.map._loaded) {
// return false;
// }
//
// var hash = this.formatHash(this.map);
// if (this.lastHash != hash) {
// location.replace(hash);
// this.lastHash = hash;
// }
// },
/* new start */
MapnificentPosition.prototype.setProgressCallback = function(callback) {
this.callback = callback;
}
const initialAddPosition = Mapnificent.prototype.addPosition;
Mapnificent.prototype.addPosition = function(latlng, time){
initialAddPosition.bind(this)(latlng, time);
return this.positions[this.positions.length - 1];
}
/* new end */
window.Mapnificent = Mapnificent;
}()); | {
progressBar.find('.progress-bar').attr({
'aria-valuenow': percent,
style: 'width: ' + percent + '%'
});
progressBar.find('.sr-only').text(percent + '% Complete');
} | identifier_body |
mapnificent.js | /*
Mapnificent - transit shed (travel time) visualisations
Copyright (C) 2015 Stefan Wehrmeyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* globals $, Quadtree, console, L, dcodeIO */
(function(){
'use strict';
function getProgressBar(percent) {
return $('<div class="progress">' +
'<div class="progress-bar progress-bar-mapnificent" role="progressbar" aria-valuenow="' + percent + '" aria-valuemin="0" aria-valuemax="100" style="width: ' + percent + '%">' +
'<span class="sr-only">' + percent + '% Complete</span>' +
'</div></div>');
}
function updateProgressBar(progressBar, percent) {
progressBar.find('.progress-bar').attr({
'aria-valuenow': percent,
style: 'width: ' + percent + '%'
});
progressBar.find('.sr-only').text(percent + '% Complete');
}
function | (mapnificent, latlng, time) {
this.mapnificent = mapnificent;
this.latlng = latlng;
this.stationMap = null;
this.progress = 0;
this.time = time === undefined ? 15 * 60 : time;
this.init();
}
MapnificentPosition.prototype.init = function(){
var self = this;
// this.marker = new L.Marker(this.latlng, {
// draggable: true,
// opacity: 0.5
// });
// this.popup = new L.Popup({
// minWidth: 200
// });
// this.marker
// .bindPopup(this.popup)
// .addTo(this.mapnificent.map);
// this.marker.on('dragend', function(){
// self.updatePosition(self.marker.getLatLng());
// });
this.startCalculation();
};
// MapnificentPosition.prototype.updatePosition = function(latlng, time){
// var needsRedraw = false, needsRecalc = false;
// if (time !== undefined) {
// if (time !== this.time) {
// needsRedraw = true;
// }
// this.time = time;
// }
// if (this.latlng.lat !== latlng.lat || this.latlng.lng !== latlng.lng) {
// needsRecalc = true;
// needsRedraw = true;
// }
// this.latlng = latlng;
// if (needsRecalc) {
// this.marker.setLatLng(this.latlng);
// this.stationMap = null;
// this.progress = 0;
// this.startCalculation();
// this.marker.openPopup();
// }
// if (needsRedraw) {
// this.mapnificent.redraw();
// }
// if (needsRedraw || needsRecalc) {
// this.mapnificent.triggerHashUpdate();
// }
// };
MapnificentPosition.prototype.updateProgress = function(percent){
var addClass = '';
if (percent === undefined) {
var max = this.mapnificent.settings.options.estimatedMaxCalculateCalls || 100000;
percent = this.progress / max * 100;
if (percent > 99){
percent = 99;
addClass = 'progress-striped active';
}
}
this.callback(percent);
// this.marker.setOpacity(Math.max(0.5, percent / 100));
// $(this.popup.getContent()).find('.progress').addClass(addClass);
// updateProgressBar($(this.popup.getContent()), percent);
// this.popup.update();
};
// MapnificentPosition.prototype.renderProgress = function() {
// var div = $('<div class="position-control">'), self = this;
// var percent = 0;
// var progressBar = getProgressBar(percent);
// div.append(progressBar);
// var removeSpan = $('<span class="position-remove glyphicon glyphicon-trash pull-right">').on('click', function(){
// self.mapnificent.removePosition(self);
// });
// div.append(removeSpan);
// this.popup.setContent(div[0]);
// };
MapnificentPosition.prototype.setTime = function(time) {
if (time !== this.time) {
this.time = time;
this.mapnificent.redraw();
this.mapnificent.triggerHashUpdate();
}
};
// MapnificentPosition.prototype.updateControls = function(){
// var self = this;
// var div = $('<div class="position-control">');
// var minutesTime = Math.round(this.time / 60);
// var input = $('<input type="range">').attr({
// max: Math.round(this.mapnificent.settings.options.maxWalkTravelTime / 60),
// min: 0,
// value: minutesTime
// }).on('change', function(){
// self.setTime(parseInt($(this).val()) * 60);
// }).on('mousemove keyup', function(){
// $(self.popup.getContent()).find('.time-display').text($(this).val() + ' min');
// if (self.mapnificent.settings.redrawOnTimeDrag) {
// self.setTime(parseInt($(this).val()) * 60);
// }
// });
// div.append(input);
// var timeSpan = $('<div class="pull-left">' +
// '<span class="glyphicon glyphicon-time"></span> ' +
// '<span class="time-display">' + minutesTime + ' min</span></div>');
// div.append(timeSpan);
// var removeSpan = $('<span class="position-remove glyphicon glyphicon-trash pull-right">').on('click', function(){
// self.mapnificent.removePosition(self);
// });
// div.append(removeSpan);
// this.popup.setContent(div[0]);
// };
MapnificentPosition.prototype.createWorker = function(){
if (this.webworker) {
return this.webworker;
}
// this.webworker = new window.Worker(this.mapnificent.settings.baseurl + 'static/js/mapnificentworker.js');
/* new start */
this.webworker = new window.Worker(this.mapnificent.settings.baseurl + 'mapnificentworker.js');
/* new end */
this.webworker.onmessage = this.workerMessage();
this.webworker.onerror = this.workerError;
};
MapnificentPosition.prototype.workerMessage = function() {
var self = this;
return function(event){
if (event.data.status === 'working') {
self.progress = event.data.at;
// self.updateProgress();
}
else if (event.data.status === 'done') {
console.log('Count loops', event.data.count);
// self.updateProgress(100);
// self.updateControls();
self.stationMap = event.data.stationMap;
self.debugMap = event.data.debugMap;
self.mapnificent.redraw();
}
};
};
MapnificentPosition.prototype.workerError = function(){
return function(event){
console.log('error', event);
};
};
MapnificentPosition.prototype.startCalculation = function(){
// this.renderProgress();
// this.marker.openPopup();
this.createWorker();
this.webworker.postMessage({
lat: this.latlng.lat,
lng: this.latlng.lng,
// fromStations: nextStations.map(function(m){ return m[0].id; }),
stations: this.mapnificent.stationList,
lines: this.mapnificent.lines,
// distances: nextStations.map(function(m){ return m[1] / 1000; }),
reportInterval: 5000,
intervalKey: this.mapnificent.settings.intervalKey,
maxWalkTime: this.mapnificent.settings.maxWalkTime,
secondsPerM: this.mapnificent.settings.secondsPerKm / 1000,
searchRadius: this.mapnificent.settings.initialStationSearchRadius,
bounds: this.mapnificent.settings.bounds,
debug: this.mapnificent.settings.debug,
});
};
MapnificentPosition.prototype.getReachableStations = function(stationsAround, start, tileSize) {
var self = this;
var getLngRadius = function(lat, mradius){
var equatorLength = 40075017,
hLength = equatorLength * Math.cos(L.LatLng.DEG_TO_RAD * lat);
return (mradius / hLength) * 360;
};
var maxWalkTime = this.mapnificent.settings.maxWalkTime;
var secondsPerKm = this.mapnificent.settings.secondsPerKm;
var convert = function(station, reachableIn) {
var secs = Math.min((self.time - reachableIn), maxWalkTime);
var mradius = secs * (1 / secondsPerKm) * 1000;
var point = new L.LatLng(station.lat, station.lng);
var lngRadius = getLngRadius(station.lat, mradius);
var latlng2 = new L.LatLng(station.lat, station.lng - lngRadius, true);
var point2 = self.mapnificent.map.latLngToLayerPoint(latlng2);
var lpoint = self.mapnificent.map.latLngToLayerPoint(point);
var radius = Math.max(Math.round(lpoint.x - point2.x), 1);
var p = self.mapnificent.map.project(point);
var x = Math.round(p.x - start.x);
var y = Math.round(p.y - start.y);
if (x + radius < 0 || x - radius > tileSize ||
y + radius < 0 || y - radius > tileSize) {
return null;
}
return {x: x, y: y, r: radius};
};
var stations = [];
if (this.stationMap === null) {
return stations;
}
// You start walking from your position
var station = convert(this.latlng, 0);
if (station !== null) {
stations.push(station);
}
for (var i = 0; i < stationsAround.length; i += 1) {
var stationTime = this.stationMap[stationsAround[i].id];
if (stationTime === undefined || stationTime >= this.time) {
continue;
}
station = convert(stationsAround[i], stationTime);
if (station !== null) {
stations.push(station);
}
}
return stations;
};
MapnificentPosition.prototype.destroy = function(){
// this.mapnificent.map.closePopup(this.popup);
// this.mapnificent.map.removeLayer(this.popup);
// this.mapnificent.map.removeLayer(this.marker);
this.webworker.terminate();
this.webworker = null;
this.stationMap = null;
// this.marker = null;
// this.popup = null;
this.redrawTime = 0;
};
function Mapnificent(map, city, pageConfig){
this.map = map;
this.positions = [];
this.time = 60 * 10;
// FIXME: this is messy
this.city = city;
this.settings = $.extend({
intervalKey: '1-6',
baseurl: '/',
dataPath: city.dataPath || './',
maxWalkTime: 15 * 60,
secondsPerKm: 13 * 60,
initialStationSearchRadius: 1000,
redrawOnTimeDrag: false,
debug: window.location.search.indexOf("debug") !== -1,
}, city);
this.settings.options = $.extend({
maxWalkTravelTime: 1.5 * 60 * 60,
}, this.settings.options)
this.settings = $.extend(this.settings, pageConfig);
}
Mapnificent.prototype.init = function(){
var self = this, t0;
self.tilesLoading = false;
return this.loadData().done(function(data){
self.prepareData(data);
self.canvasTileLayer = L.tileLayer.canvas();
self.canvasTileLayer.on('loading', function(){
self.tilesLoading = true;
t0 = new Date().getTime();
});
self.canvasTileLayer.on('load', function(){
self.tilesLoading = false;
if (self.needsRedraw) {
self.redraw();
}
self.redrawTime = (new Date().getTime()) - t0;
console.log('reloading tile layer took', self.redrawTime, 'ms');
});
self.canvasTileLayer.drawTile = self.drawTile();
self.map.addLayer(self.canvasTileLayer);
// self.map.on('click', function(e) {
// self.addPosition(e.latlng);
// });
// self.map.on('contextmenu', function(e) {
// if (self.settings.debug) {
// self.logDebugMessage(e.latlng);
// }
// });
// self.augmentLeafletHash();
if (self.settings.coordinates) {
// self.hash.update();
if (self.positions.length === 0) {
self.addPosition(L.latLng(
self.settings.coordinates[1],
self.settings.coordinates[0]
));
}
}
});
};
Mapnificent.prototype.logDebugMessage = function(latlng) {
var self = this;
var stationsAround = this.quadtree.searchInRadius(latlng.lat, latlng.lng, 300);
this.positions.forEach(function(pos, i){
console.log('Position ', i);
if (pos.debugMap === undefined) {
console.log('No debug map present');
}
stationsAround.forEach(function(station, j){
var lastTransport;
console.log('Found station', station.Name);
if (pos.debugMap[station.id] === undefined) {
console.log('Not reached');
return;
}
var totalTime = 0
pos.debugMap[station.id].forEach(function(stop, k){
var fromName = '$walking'
var distance
var toStop = self.stationList[stop.to]
if (stop.from !== -1) {
var fromStop = self.stationList[stop.from]
fromName = fromStop.Name
distance = self.quadtree.distanceBetweenCoordinates(
fromStop.Latitude, fromStop.Longitude,
toStop.Latitude, toStop.Longitude
)
}
if (lastTransport != stop.line) {
console.log(k, 'Switching transport to', self.lineNames[stop.line],
'waiting: ', stop.waittime);
}
lastTransport = stop.line;
var currentTime = stop.time - totalTime;
totalTime = stop.time;
console.log(k, fromName, '->',
toStop.Name,
'via', self.lineNames[stop.line],
'in', currentTime,
' (' +
'stay: ' + stop.stay +
', total time: ' + stop.time +
', total walk time: ' + stop.walkTime +
', distance: ' + distance +' meters)');
});
});
});
};
Mapnificent.prototype.loadData = function(){
var dataUrl = this.settings.dataPath + this.settings.cityid;
if (this.settings.debug) {
dataUrl += '__debug';
}
dataUrl += '.bin';
const MAPNIFICENT_PROTO = {"nested":{"mapnificent":{"nested":{"MapnificentNetwork":{"fields":{"Cityid":{"type":"string","id":1},"Stops":{"rule":"repeated","type":"Stop","id":2},"Lines":{"rule":"repeated","type":"Line","id":3}},"nested":{"Stop":{"fields":{"Latitude":{"type":"double","id":1},"Longitude":{"type":"double","id":2},"TravelOptions":{"rule":"repeated","type":"TravelOption","id":3},"Name":{"type":"string","id":4}},"nested":{"TravelOption":{"fields":{"Stop":{"type":"uint32","id":1},"TravelTime":{"type":"uint32","id":2},"StayTime":{"type":"uint32","id":3},"Line":{"type":"string","id":4},"WalkDistance":{"type":"uint32","id":5}}}}},"Line":{"fields":{"LineId":{"type":"string","id":1},"LineTimes":{"rule":"repeated","type":"LineTime","id":2},"Name":{"type":"string","id":3}},"nested":{"LineTime":{"fields":{"Interval":{"type":"uint32","id":1},"Start":{"type":"uint32","id":2},"Stop":{"type":"uint32","id":3},"Weekday":{"type":"uint32","id":4}}}}}}}}}}};
var protoRoot = protobuf.Root.fromJSON(MAPNIFICENT_PROTO);
var d = $.Deferred();
// var loadProgress = $('#load-progress');
// var progressBar = getProgressBar(0.0);
// loadProgress.find('.modal-body').html(progressBar);
// loadProgress.modal('show');
var oReq = new XMLHttpRequest();
oReq.open("GET", dataUrl, true);
oReq.responseType = "arraybuffer";
oReq.onload = function(oEvent) {
var MapnificentNetwork = protoRoot.lookupType('mapnificent.MapnificentNetwork');
console.log('received binary', new Date().getTime());
var message = MapnificentNetwork.decode(new Uint8Array(oEvent.target.response));
console.log('decoded message', new Date().getTime());
// loadProgress.modal('hide');
d.resolve(message);
};
// oReq.addEventListener("progress", function updateProgress (oEvent) {
// if (oEvent.lengthComputable) {
// var percentComplete = oEvent.loaded / oEvent.total * 100;
// updateProgressBar(loadProgress, percentComplete);
// } else {
// updateProgressBar(loadProgress, 100);
// loadProgress.find('.progress').addClass('active progress-striped');
// }
// });
oReq.send();
return d;
};
Mapnificent.prototype.getLineTimesByInterval = function(lineTimes) {
var result = {};
for (var i = 0; i < lineTimes.length; i += 1) {
result[lineTimes[i].Weekday + '-' + lineTimes[i].Start] = lineTimes[i].Interval;
}
return result;
}
Mapnificent.prototype.prepareData = function(data) {
this.stationList = data.Stops;
this.lines = {};
this.lineNames = {};
var selat = Infinity, nwlat = -Infinity, nwlng = Infinity, selng = -Infinity;
for (var i = 0; i < this.stationList.length; i += 1){
this.stationList[i].id = i;
this.stationList[i].lat = data.Stops[i].Latitude;
this.stationList[i].lng = data.Stops[i].Longitude;
selat = Math.min(selat, this.stationList[i].lat);
nwlat = Math.max(nwlat, this.stationList[i].lat);
selng = Math.max(selng, this.stationList[i].lng);
nwlng = Math.min(nwlng, this.stationList[i].lng);
}
for (i = 0; i < data.Lines.length; i += 1) {
if (!data.Lines[i].LineTimes[0]) { continue; }
this.lines[data.Lines[i].LineId] = this.getLineTimesByInterval(data.Lines[i].LineTimes);
if (this.settings.debug) {
this.lineNames[data.Lines[i].LineId] = data.Lines[i].Name;
}
}
var b = 0.01;
this.settings.bounds = [selat - b, nwlat + b, nwlng - b, selng + b];
this.quadtree = Quadtree.create(
this.settings.bounds[0], this.settings.bounds[1],
this.settings.bounds[2], this.settings.bounds[3]
);
this.quadtree.insertAll(this.stationList);
};
Mapnificent.prototype.redraw = function(){
var self = this;
this.needsRedraw = true;
if (this.canvasTileLayer) {
if (this.tilesLoading) {
return;
}
L.Util.requestAnimFrame(function(){
self.needsRedraw = false;
self.canvasTileLayer.redraw();
});
}
};
Mapnificent.prototype.addPosition = function(latlng, time){
this.positions.push(new MapnificentPosition(this, latlng, time));
// this.triggerHashUpdate();
};
Mapnificent.prototype.removePosition = function(pos) {
this.positions = this.positions.filter(function(p){
return p !== pos;
});
pos.destroy();
this.redraw();
// this.triggerHashUpdate();
};
// Mapnificent.prototype.triggerHashUpdate = function() {
// this.hash.onMapMove();
// }
Mapnificent.prototype.drawTile = function() {
var self = this;
var maxWalkTime = this.settings.maxWalkTime;
var secondsPerKm = this.settings.secondsPerKm;
return function(canvas, tilePoint) {
if (!self.stationList || !self.positions.length) {
return;
}
var ctx = canvas.getContext('2d');
ctx.clearRect(0, 0, canvas.width, canvas.height);
/* Figure out how many stations we have to look at around
this tile.
*/
var tileSize = this.options.tileSize;
var start = tilePoint.multiplyBy(tileSize);
var end = start.add([tileSize, 0]);
var startLatLng = this._map.unproject(start);
var endLatLng = this._map.unproject(end);
var spanInMeters = startLatLng.distanceTo(endLatLng);
var maxWalkDistance = maxWalkTime * (1 / secondsPerKm) * 1000;
var middle = start.add([tileSize / 2, tileSize / 2]);
var latlng = this._map.unproject(middle);
var searchRadius = Math.sqrt(spanInMeters * spanInMeters + spanInMeters * spanInMeters);
searchRadius += maxWalkDistance;
var stationsAround = self.quadtree.searchInRadius(latlng.lat, latlng.lng, searchRadius);
ctx.globalCompositeOperation = 'source-over';
ctx.fillStyle = 'rgba(50,50,50,0.4)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.globalCompositeOperation = 'destination-out';
ctx.fillStyle = 'rgba(0,0,0,1)';
for (var i = 0; i < self.positions.length; i += 1) {
var drawStations = self.positions[i].getReachableStations(stationsAround, start, tileSize);
for (var j = 0; j < drawStations.length; j += 1) {
ctx.beginPath();
ctx.arc(drawStations[j].x, drawStations[j].y,
drawStations[j].r, 0, 2 * Math.PI, false);
ctx.fill();
}
}
};
};
Mapnificent.prototype.augmentLeafletHash = function() {
var mapnificent = this;
var formatHash = function(map) {
var center = map.getCenter(),
zoom = map.getZoom(),
precision = Math.max(0, Math.ceil(Math.log(zoom) / Math.LN2));
var params = [
zoom,
center.lat.toFixed(precision),
center.lng.toFixed(precision)
];
mapnificent.positions.forEach(function(pos) {
params.push(pos.time);
params.push(pos.latlng.lat.toFixed(precision));
params.push(pos.latlng.lng.toFixed(precision));
});
return "#" + params.join("/");
}
var parseHash = function(hash) {
if(hash.indexOf('#') === 0) {
hash = hash.substr(1);
}
var args = hash.split("/");
var parsed;
if (args.length < 3) {
return false;
}
var zoom = parseInt(args[0], 10),
lat = parseFloat(args[1]),
lon = parseFloat(args[2]);
if (isNaN(zoom) || isNaN(lat) || isNaN(lon)) {
parsed = false;
} else {
parsed = {
center: new L.LatLng(lat, lon),
zoom: zoom
};
}
var posIndex = 0;
for (var i = 3; i < args.length; i += 3) {
var time = parseInt(args[i], 10);
lat = parseFloat(args[i + 1]);
lon = parseFloat(args[i + 2]);
if (isNaN(time) || isNaN(lat) || isNaN(lon)) {
continue
}
if (mapnificent.positions[posIndex] === undefined) {
mapnificent.addPosition(new L.LatLng(lat, lon), time);
} else {
mapnificent.positions[posIndex].updatePosition(new L.LatLng(lat, lon), time);
}
posIndex += 1;
}
for (i = posIndex; i < mapnificent.positions.length; i += 1) {
mapnificent.removePosition(mapnificent.positions[i]);
}
return parsed;
};
L.Hash.prototype.formatHash = formatHash;
L.Hash.prototype.parseHash = parseHash;
this.hash = new L.Hash(this.map);
};
//
// onMapMove: function() {
// // bail if we're moving the map (updating from a hash),
// // or if the map is not yet loaded
//
// if (this.movingMap || !this.map._loaded) {
// return false;
// }
//
// var hash = this.formatHash(this.map);
// if (this.lastHash != hash) {
// location.replace(hash);
// this.lastHash = hash;
// }
// },
/* new start */
MapnificentPosition.prototype.setProgressCallback = function(callback) {
this.callback = callback;
}
const initialAddPosition = Mapnificent.prototype.addPosition;
Mapnificent.prototype.addPosition = function(latlng, time){
initialAddPosition.bind(this)(latlng, time);
return this.positions[this.positions.length - 1];
}
/* new end */
window.Mapnificent = Mapnificent;
}()); | MapnificentPosition | identifier_name |
mapnificent.js | /*
Mapnificent - transit shed (travel time) visualisations
Copyright (C) 2015 Stefan Wehrmeyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* globals $, Quadtree, console, L, dcodeIO */
(function(){
'use strict';
function getProgressBar(percent) {
return $('<div class="progress">' +
'<div class="progress-bar progress-bar-mapnificent" role="progressbar" aria-valuenow="' + percent + '" aria-valuemin="0" aria-valuemax="100" style="width: ' + percent + '%">' +
'<span class="sr-only">' + percent + '% Complete</span>' +
'</div></div>');
}
function updateProgressBar(progressBar, percent) {
progressBar.find('.progress-bar').attr({
'aria-valuenow': percent,
style: 'width: ' + percent + '%'
});
progressBar.find('.sr-only').text(percent + '% Complete');
}
function MapnificentPosition(mapnificent, latlng, time) {
this.mapnificent = mapnificent;
this.latlng = latlng;
this.stationMap = null;
this.progress = 0;
this.time = time === undefined ? 15 * 60 : time;
this.init();
}
MapnificentPosition.prototype.init = function(){
var self = this;
// this.marker = new L.Marker(this.latlng, {
// draggable: true,
// opacity: 0.5
// });
// this.popup = new L.Popup({
// minWidth: 200
// });
// this.marker
// .bindPopup(this.popup)
// .addTo(this.mapnificent.map);
// this.marker.on('dragend', function(){
// self.updatePosition(self.marker.getLatLng());
// });
this.startCalculation();
};
// MapnificentPosition.prototype.updatePosition = function(latlng, time){
// var needsRedraw = false, needsRecalc = false;
// if (time !== undefined) {
// if (time !== this.time) {
// needsRedraw = true;
// }
// this.time = time;
// }
// if (this.latlng.lat !== latlng.lat || this.latlng.lng !== latlng.lng) {
// needsRecalc = true;
// needsRedraw = true;
// }
// this.latlng = latlng;
// if (needsRecalc) {
// this.marker.setLatLng(this.latlng);
// this.stationMap = null;
// this.progress = 0;
// this.startCalculation();
// this.marker.openPopup();
// }
// if (needsRedraw) {
// this.mapnificent.redraw();
// }
// if (needsRedraw || needsRecalc) {
// this.mapnificent.triggerHashUpdate();
// }
// };
MapnificentPosition.prototype.updateProgress = function(percent){
var addClass = '';
if (percent === undefined) {
var max = this.mapnificent.settings.options.estimatedMaxCalculateCalls || 100000;
percent = this.progress / max * 100;
if (percent > 99){
percent = 99;
addClass = 'progress-striped active';
}
}
this.callback(percent);
// this.marker.setOpacity(Math.max(0.5, percent / 100));
// $(this.popup.getContent()).find('.progress').addClass(addClass);
// updateProgressBar($(this.popup.getContent()), percent);
// this.popup.update();
};
// MapnificentPosition.prototype.renderProgress = function() {
// var div = $('<div class="position-control">'), self = this;
// var percent = 0;
// var progressBar = getProgressBar(percent);
// div.append(progressBar);
// var removeSpan = $('<span class="position-remove glyphicon glyphicon-trash pull-right">').on('click', function(){
// self.mapnificent.removePosition(self);
// });
// div.append(removeSpan);
// this.popup.setContent(div[0]);
// };
MapnificentPosition.prototype.setTime = function(time) {
if (time !== this.time) {
this.time = time;
this.mapnificent.redraw();
this.mapnificent.triggerHashUpdate();
}
};
// MapnificentPosition.prototype.updateControls = function(){
// var self = this;
// var div = $('<div class="position-control">');
// var minutesTime = Math.round(this.time / 60);
// var input = $('<input type="range">').attr({
// max: Math.round(this.mapnificent.settings.options.maxWalkTravelTime / 60),
// min: 0,
// value: minutesTime
// }).on('change', function(){
// self.setTime(parseInt($(this).val()) * 60);
// }).on('mousemove keyup', function(){
// $(self.popup.getContent()).find('.time-display').text($(this).val() + ' min');
// if (self.mapnificent.settings.redrawOnTimeDrag) {
// self.setTime(parseInt($(this).val()) * 60);
// }
// });
// div.append(input);
// var timeSpan = $('<div class="pull-left">' +
// '<span class="glyphicon glyphicon-time"></span> ' +
// '<span class="time-display">' + minutesTime + ' min</span></div>');
// div.append(timeSpan);
// var removeSpan = $('<span class="position-remove glyphicon glyphicon-trash pull-right">').on('click', function(){
// self.mapnificent.removePosition(self);
// });
// div.append(removeSpan);
// this.popup.setContent(div[0]);
// };
MapnificentPosition.prototype.createWorker = function(){
if (this.webworker) {
return this.webworker;
}
// this.webworker = new window.Worker(this.mapnificent.settings.baseurl + 'static/js/mapnificentworker.js');
/* new start */
this.webworker = new window.Worker(this.mapnificent.settings.baseurl + 'mapnificentworker.js');
/* new end */
this.webworker.onmessage = this.workerMessage();
this.webworker.onerror = this.workerError;
};
MapnificentPosition.prototype.workerMessage = function() {
var self = this;
return function(event){
if (event.data.status === 'working') {
self.progress = event.data.at;
// self.updateProgress();
}
else if (event.data.status === 'done') {
console.log('Count loops', event.data.count);
// self.updateProgress(100);
// self.updateControls();
self.stationMap = event.data.stationMap;
self.debugMap = event.data.debugMap;
self.mapnificent.redraw();
}
};
};
MapnificentPosition.prototype.workerError = function(){
return function(event){
console.log('error', event);
};
};
MapnificentPosition.prototype.startCalculation = function(){
// this.renderProgress();
// this.marker.openPopup();
this.createWorker();
this.webworker.postMessage({
lat: this.latlng.lat,
lng: this.latlng.lng,
// fromStations: nextStations.map(function(m){ return m[0].id; }),
stations: this.mapnificent.stationList,
lines: this.mapnificent.lines,
// distances: nextStations.map(function(m){ return m[1] / 1000; }),
reportInterval: 5000,
intervalKey: this.mapnificent.settings.intervalKey,
maxWalkTime: this.mapnificent.settings.maxWalkTime,
secondsPerM: this.mapnificent.settings.secondsPerKm / 1000,
searchRadius: this.mapnificent.settings.initialStationSearchRadius,
bounds: this.mapnificent.settings.bounds,
debug: this.mapnificent.settings.debug,
});
};
MapnificentPosition.prototype.getReachableStations = function(stationsAround, start, tileSize) {
var self = this;
var getLngRadius = function(lat, mradius){
var equatorLength = 40075017,
hLength = equatorLength * Math.cos(L.LatLng.DEG_TO_RAD * lat);
return (mradius / hLength) * 360;
};
var maxWalkTime = this.mapnificent.settings.maxWalkTime;
var secondsPerKm = this.mapnificent.settings.secondsPerKm;
var convert = function(station, reachableIn) {
var secs = Math.min((self.time - reachableIn), maxWalkTime);
var mradius = secs * (1 / secondsPerKm) * 1000;
var point = new L.LatLng(station.lat, station.lng);
var lngRadius = getLngRadius(station.lat, mradius);
var latlng2 = new L.LatLng(station.lat, station.lng - lngRadius, true);
var point2 = self.mapnificent.map.latLngToLayerPoint(latlng2);
var lpoint = self.mapnificent.map.latLngToLayerPoint(point); | if (x + radius < 0 || x - radius > tileSize ||
y + radius < 0 || y - radius > tileSize) {
return null;
}
return {x: x, y: y, r: radius};
};
var stations = [];
if (this.stationMap === null) {
return stations;
}
// You start walking from your position
var station = convert(this.latlng, 0);
if (station !== null) {
stations.push(station);
}
for (var i = 0; i < stationsAround.length; i += 1) {
var stationTime = this.stationMap[stationsAround[i].id];
if (stationTime === undefined || stationTime >= this.time) {
continue;
}
station = convert(stationsAround[i], stationTime);
if (station !== null) {
stations.push(station);
}
}
return stations;
};
MapnificentPosition.prototype.destroy = function(){
// this.mapnificent.map.closePopup(this.popup);
// this.mapnificent.map.removeLayer(this.popup);
// this.mapnificent.map.removeLayer(this.marker);
this.webworker.terminate();
this.webworker = null;
this.stationMap = null;
// this.marker = null;
// this.popup = null;
this.redrawTime = 0;
};
function Mapnificent(map, city, pageConfig){
this.map = map;
this.positions = [];
this.time = 60 * 10;
// FIXME: this is messy
this.city = city;
this.settings = $.extend({
intervalKey: '1-6',
baseurl: '/',
dataPath: city.dataPath || './',
maxWalkTime: 15 * 60,
secondsPerKm: 13 * 60,
initialStationSearchRadius: 1000,
redrawOnTimeDrag: false,
debug: window.location.search.indexOf("debug") !== -1,
}, city);
this.settings.options = $.extend({
maxWalkTravelTime: 1.5 * 60 * 60,
}, this.settings.options)
this.settings = $.extend(this.settings, pageConfig);
}
Mapnificent.prototype.init = function(){
var self = this, t0;
self.tilesLoading = false;
return this.loadData().done(function(data){
self.prepareData(data);
self.canvasTileLayer = L.tileLayer.canvas();
self.canvasTileLayer.on('loading', function(){
self.tilesLoading = true;
t0 = new Date().getTime();
});
self.canvasTileLayer.on('load', function(){
self.tilesLoading = false;
if (self.needsRedraw) {
self.redraw();
}
self.redrawTime = (new Date().getTime()) - t0;
console.log('reloading tile layer took', self.redrawTime, 'ms');
});
self.canvasTileLayer.drawTile = self.drawTile();
self.map.addLayer(self.canvasTileLayer);
// self.map.on('click', function(e) {
// self.addPosition(e.latlng);
// });
// self.map.on('contextmenu', function(e) {
// if (self.settings.debug) {
// self.logDebugMessage(e.latlng);
// }
// });
// self.augmentLeafletHash();
if (self.settings.coordinates) {
// self.hash.update();
if (self.positions.length === 0) {
self.addPosition(L.latLng(
self.settings.coordinates[1],
self.settings.coordinates[0]
));
}
}
});
};
Mapnificent.prototype.logDebugMessage = function(latlng) {
var self = this;
var stationsAround = this.quadtree.searchInRadius(latlng.lat, latlng.lng, 300);
this.positions.forEach(function(pos, i){
console.log('Position ', i);
if (pos.debugMap === undefined) {
console.log('No debug map present');
}
stationsAround.forEach(function(station, j){
var lastTransport;
console.log('Found station', station.Name);
if (pos.debugMap[station.id] === undefined) {
console.log('Not reached');
return;
}
var totalTime = 0
pos.debugMap[station.id].forEach(function(stop, k){
var fromName = '$walking'
var distance
var toStop = self.stationList[stop.to]
if (stop.from !== -1) {
var fromStop = self.stationList[stop.from]
fromName = fromStop.Name
distance = self.quadtree.distanceBetweenCoordinates(
fromStop.Latitude, fromStop.Longitude,
toStop.Latitude, toStop.Longitude
)
}
if (lastTransport != stop.line) {
console.log(k, 'Switching transport to', self.lineNames[stop.line],
'waiting: ', stop.waittime);
}
lastTransport = stop.line;
var currentTime = stop.time - totalTime;
totalTime = stop.time;
console.log(k, fromName, '->',
toStop.Name,
'via', self.lineNames[stop.line],
'in', currentTime,
' (' +
'stay: ' + stop.stay +
', total time: ' + stop.time +
', total walk time: ' + stop.walkTime +
', distance: ' + distance +' meters)');
});
});
});
};
Mapnificent.prototype.loadData = function(){
var dataUrl = this.settings.dataPath + this.settings.cityid;
if (this.settings.debug) {
dataUrl += '__debug';
}
dataUrl += '.bin';
const MAPNIFICENT_PROTO = {"nested":{"mapnificent":{"nested":{"MapnificentNetwork":{"fields":{"Cityid":{"type":"string","id":1},"Stops":{"rule":"repeated","type":"Stop","id":2},"Lines":{"rule":"repeated","type":"Line","id":3}},"nested":{"Stop":{"fields":{"Latitude":{"type":"double","id":1},"Longitude":{"type":"double","id":2},"TravelOptions":{"rule":"repeated","type":"TravelOption","id":3},"Name":{"type":"string","id":4}},"nested":{"TravelOption":{"fields":{"Stop":{"type":"uint32","id":1},"TravelTime":{"type":"uint32","id":2},"StayTime":{"type":"uint32","id":3},"Line":{"type":"string","id":4},"WalkDistance":{"type":"uint32","id":5}}}}},"Line":{"fields":{"LineId":{"type":"string","id":1},"LineTimes":{"rule":"repeated","type":"LineTime","id":2},"Name":{"type":"string","id":3}},"nested":{"LineTime":{"fields":{"Interval":{"type":"uint32","id":1},"Start":{"type":"uint32","id":2},"Stop":{"type":"uint32","id":3},"Weekday":{"type":"uint32","id":4}}}}}}}}}}};
var protoRoot = protobuf.Root.fromJSON(MAPNIFICENT_PROTO);
var d = $.Deferred();
// var loadProgress = $('#load-progress');
// var progressBar = getProgressBar(0.0);
// loadProgress.find('.modal-body').html(progressBar);
// loadProgress.modal('show');
var oReq = new XMLHttpRequest();
oReq.open("GET", dataUrl, true);
oReq.responseType = "arraybuffer";
oReq.onload = function(oEvent) {
var MapnificentNetwork = protoRoot.lookupType('mapnificent.MapnificentNetwork');
console.log('received binary', new Date().getTime());
var message = MapnificentNetwork.decode(new Uint8Array(oEvent.target.response));
console.log('decoded message', new Date().getTime());
// loadProgress.modal('hide');
d.resolve(message);
};
// oReq.addEventListener("progress", function updateProgress (oEvent) {
// if (oEvent.lengthComputable) {
// var percentComplete = oEvent.loaded / oEvent.total * 100;
// updateProgressBar(loadProgress, percentComplete);
// } else {
// updateProgressBar(loadProgress, 100);
// loadProgress.find('.progress').addClass('active progress-striped');
// }
// });
oReq.send();
return d;
};
Mapnificent.prototype.getLineTimesByInterval = function(lineTimes) {
var result = {};
for (var i = 0; i < lineTimes.length; i += 1) {
result[lineTimes[i].Weekday + '-' + lineTimes[i].Start] = lineTimes[i].Interval;
}
return result;
}
Mapnificent.prototype.prepareData = function(data) {
this.stationList = data.Stops;
this.lines = {};
this.lineNames = {};
var selat = Infinity, nwlat = -Infinity, nwlng = Infinity, selng = -Infinity;
for (var i = 0; i < this.stationList.length; i += 1){
this.stationList[i].id = i;
this.stationList[i].lat = data.Stops[i].Latitude;
this.stationList[i].lng = data.Stops[i].Longitude;
selat = Math.min(selat, this.stationList[i].lat);
nwlat = Math.max(nwlat, this.stationList[i].lat);
selng = Math.max(selng, this.stationList[i].lng);
nwlng = Math.min(nwlng, this.stationList[i].lng);
}
for (i = 0; i < data.Lines.length; i += 1) {
if (!data.Lines[i].LineTimes[0]) { continue; }
this.lines[data.Lines[i].LineId] = this.getLineTimesByInterval(data.Lines[i].LineTimes);
if (this.settings.debug) {
this.lineNames[data.Lines[i].LineId] = data.Lines[i].Name;
}
}
var b = 0.01;
this.settings.bounds = [selat - b, nwlat + b, nwlng - b, selng + b];
this.quadtree = Quadtree.create(
this.settings.bounds[0], this.settings.bounds[1],
this.settings.bounds[2], this.settings.bounds[3]
);
this.quadtree.insertAll(this.stationList);
};
Mapnificent.prototype.redraw = function(){
var self = this;
this.needsRedraw = true;
if (this.canvasTileLayer) {
if (this.tilesLoading) {
return;
}
L.Util.requestAnimFrame(function(){
self.needsRedraw = false;
self.canvasTileLayer.redraw();
});
}
};
Mapnificent.prototype.addPosition = function(latlng, time){
this.positions.push(new MapnificentPosition(this, latlng, time));
// this.triggerHashUpdate();
};
Mapnificent.prototype.removePosition = function(pos) {
this.positions = this.positions.filter(function(p){
return p !== pos;
});
pos.destroy();
this.redraw();
// this.triggerHashUpdate();
};
// Mapnificent.prototype.triggerHashUpdate = function() {
// this.hash.onMapMove();
// }
Mapnificent.prototype.drawTile = function() {
var self = this;
var maxWalkTime = this.settings.maxWalkTime;
var secondsPerKm = this.settings.secondsPerKm;
return function(canvas, tilePoint) {
if (!self.stationList || !self.positions.length) {
return;
}
var ctx = canvas.getContext('2d');
ctx.clearRect(0, 0, canvas.width, canvas.height);
/* Figure out how many stations we have to look at around
this tile.
*/
var tileSize = this.options.tileSize;
var start = tilePoint.multiplyBy(tileSize);
var end = start.add([tileSize, 0]);
var startLatLng = this._map.unproject(start);
var endLatLng = this._map.unproject(end);
var spanInMeters = startLatLng.distanceTo(endLatLng);
var maxWalkDistance = maxWalkTime * (1 / secondsPerKm) * 1000;
var middle = start.add([tileSize / 2, tileSize / 2]);
var latlng = this._map.unproject(middle);
var searchRadius = Math.sqrt(spanInMeters * spanInMeters + spanInMeters * spanInMeters);
searchRadius += maxWalkDistance;
var stationsAround = self.quadtree.searchInRadius(latlng.lat, latlng.lng, searchRadius);
ctx.globalCompositeOperation = 'source-over';
ctx.fillStyle = 'rgba(50,50,50,0.4)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.globalCompositeOperation = 'destination-out';
ctx.fillStyle = 'rgba(0,0,0,1)';
for (var i = 0; i < self.positions.length; i += 1) {
var drawStations = self.positions[i].getReachableStations(stationsAround, start, tileSize);
for (var j = 0; j < drawStations.length; j += 1) {
ctx.beginPath();
ctx.arc(drawStations[j].x, drawStations[j].y,
drawStations[j].r, 0, 2 * Math.PI, false);
ctx.fill();
}
}
};
};
Mapnificent.prototype.augmentLeafletHash = function() {
var mapnificent = this;
var formatHash = function(map) {
var center = map.getCenter(),
zoom = map.getZoom(),
precision = Math.max(0, Math.ceil(Math.log(zoom) / Math.LN2));
var params = [
zoom,
center.lat.toFixed(precision),
center.lng.toFixed(precision)
];
mapnificent.positions.forEach(function(pos) {
params.push(pos.time);
params.push(pos.latlng.lat.toFixed(precision));
params.push(pos.latlng.lng.toFixed(precision));
});
return "#" + params.join("/");
}
var parseHash = function(hash) {
if(hash.indexOf('#') === 0) {
hash = hash.substr(1);
}
var args = hash.split("/");
var parsed;
if (args.length < 3) {
return false;
}
var zoom = parseInt(args[0], 10),
lat = parseFloat(args[1]),
lon = parseFloat(args[2]);
if (isNaN(zoom) || isNaN(lat) || isNaN(lon)) {
parsed = false;
} else {
parsed = {
center: new L.LatLng(lat, lon),
zoom: zoom
};
}
var posIndex = 0;
for (var i = 3; i < args.length; i += 3) {
var time = parseInt(args[i], 10);
lat = parseFloat(args[i + 1]);
lon = parseFloat(args[i + 2]);
if (isNaN(time) || isNaN(lat) || isNaN(lon)) {
continue
}
if (mapnificent.positions[posIndex] === undefined) {
mapnificent.addPosition(new L.LatLng(lat, lon), time);
} else {
mapnificent.positions[posIndex].updatePosition(new L.LatLng(lat, lon), time);
}
posIndex += 1;
}
for (i = posIndex; i < mapnificent.positions.length; i += 1) {
mapnificent.removePosition(mapnificent.positions[i]);
}
return parsed;
};
L.Hash.prototype.formatHash = formatHash;
L.Hash.prototype.parseHash = parseHash;
this.hash = new L.Hash(this.map);
};
//
// onMapMove: function() {
// // bail if we're moving the map (updating from a hash),
// // or if the map is not yet loaded
//
// if (this.movingMap || !this.map._loaded) {
// return false;
// }
//
// var hash = this.formatHash(this.map);
// if (this.lastHash != hash) {
// location.replace(hash);
// this.lastHash = hash;
// }
// },
/* new start */
MapnificentPosition.prototype.setProgressCallback = function(callback) {
this.callback = callback;
}
const initialAddPosition = Mapnificent.prototype.addPosition;
Mapnificent.prototype.addPosition = function(latlng, time){
initialAddPosition.bind(this)(latlng, time);
return this.positions[this.positions.length - 1];
}
/* new end */
window.Mapnificent = Mapnificent;
}()); | var radius = Math.max(Math.round(lpoint.x - point2.x), 1);
var p = self.mapnificent.map.project(point);
var x = Math.round(p.x - start.x);
var y = Math.round(p.y - start.y); | random_line_split |
mapnificent.js | /*
Mapnificent - transit shed (travel time) visualisations
Copyright (C) 2015 Stefan Wehrmeyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* globals $, Quadtree, console, L, dcodeIO */
(function(){
'use strict';
function getProgressBar(percent) {
return $('<div class="progress">' +
'<div class="progress-bar progress-bar-mapnificent" role="progressbar" aria-valuenow="' + percent + '" aria-valuemin="0" aria-valuemax="100" style="width: ' + percent + '%">' +
'<span class="sr-only">' + percent + '% Complete</span>' +
'</div></div>');
}
function updateProgressBar(progressBar, percent) {
progressBar.find('.progress-bar').attr({
'aria-valuenow': percent,
style: 'width: ' + percent + '%'
});
progressBar.find('.sr-only').text(percent + '% Complete');
}
function MapnificentPosition(mapnificent, latlng, time) {
this.mapnificent = mapnificent;
this.latlng = latlng;
this.stationMap = null;
this.progress = 0;
this.time = time === undefined ? 15 * 60 : time;
this.init();
}
MapnificentPosition.prototype.init = function(){
var self = this;
// this.marker = new L.Marker(this.latlng, {
// draggable: true,
// opacity: 0.5
// });
// this.popup = new L.Popup({
// minWidth: 200
// });
// this.marker
// .bindPopup(this.popup)
// .addTo(this.mapnificent.map);
// this.marker.on('dragend', function(){
// self.updatePosition(self.marker.getLatLng());
// });
this.startCalculation();
};
// MapnificentPosition.prototype.updatePosition = function(latlng, time){
// var needsRedraw = false, needsRecalc = false;
// if (time !== undefined) {
// if (time !== this.time) {
// needsRedraw = true;
// }
// this.time = time;
// }
// if (this.latlng.lat !== latlng.lat || this.latlng.lng !== latlng.lng) {
// needsRecalc = true;
// needsRedraw = true;
// }
// this.latlng = latlng;
// if (needsRecalc) {
// this.marker.setLatLng(this.latlng);
// this.stationMap = null;
// this.progress = 0;
// this.startCalculation();
// this.marker.openPopup();
// }
// if (needsRedraw) {
// this.mapnificent.redraw();
// }
// if (needsRedraw || needsRecalc) {
// this.mapnificent.triggerHashUpdate();
// }
// };
MapnificentPosition.prototype.updateProgress = function(percent){
var addClass = '';
if (percent === undefined) {
var max = this.mapnificent.settings.options.estimatedMaxCalculateCalls || 100000;
percent = this.progress / max * 100;
if (percent > 99){
percent = 99;
addClass = 'progress-striped active';
}
}
this.callback(percent);
// this.marker.setOpacity(Math.max(0.5, percent / 100));
// $(this.popup.getContent()).find('.progress').addClass(addClass);
// updateProgressBar($(this.popup.getContent()), percent);
// this.popup.update();
};
// MapnificentPosition.prototype.renderProgress = function() {
// var div = $('<div class="position-control">'), self = this;
// var percent = 0;
// var progressBar = getProgressBar(percent);
// div.append(progressBar);
// var removeSpan = $('<span class="position-remove glyphicon glyphicon-trash pull-right">').on('click', function(){
// self.mapnificent.removePosition(self);
// });
// div.append(removeSpan);
// this.popup.setContent(div[0]);
// };
MapnificentPosition.prototype.setTime = function(time) {
if (time !== this.time) {
this.time = time;
this.mapnificent.redraw();
this.mapnificent.triggerHashUpdate();
}
};
// MapnificentPosition.prototype.updateControls = function(){
// var self = this;
// var div = $('<div class="position-control">');
// var minutesTime = Math.round(this.time / 60);
// var input = $('<input type="range">').attr({
// max: Math.round(this.mapnificent.settings.options.maxWalkTravelTime / 60),
// min: 0,
// value: minutesTime
// }).on('change', function(){
// self.setTime(parseInt($(this).val()) * 60);
// }).on('mousemove keyup', function(){
// $(self.popup.getContent()).find('.time-display').text($(this).val() + ' min');
// if (self.mapnificent.settings.redrawOnTimeDrag) {
// self.setTime(parseInt($(this).val()) * 60);
// }
// });
// div.append(input);
// var timeSpan = $('<div class="pull-left">' +
// '<span class="glyphicon glyphicon-time"></span> ' +
// '<span class="time-display">' + minutesTime + ' min</span></div>');
// div.append(timeSpan);
// var removeSpan = $('<span class="position-remove glyphicon glyphicon-trash pull-right">').on('click', function(){
// self.mapnificent.removePosition(self);
// });
// div.append(removeSpan);
// this.popup.setContent(div[0]);
// };
MapnificentPosition.prototype.createWorker = function(){
if (this.webworker) {
return this.webworker;
}
// this.webworker = new window.Worker(this.mapnificent.settings.baseurl + 'static/js/mapnificentworker.js');
/* new start */
this.webworker = new window.Worker(this.mapnificent.settings.baseurl + 'mapnificentworker.js');
/* new end */
this.webworker.onmessage = this.workerMessage();
this.webworker.onerror = this.workerError;
};
MapnificentPosition.prototype.workerMessage = function() {
var self = this;
return function(event){
if (event.data.status === 'working') {
self.progress = event.data.at;
// self.updateProgress();
}
else if (event.data.status === 'done') {
console.log('Count loops', event.data.count);
// self.updateProgress(100);
// self.updateControls();
self.stationMap = event.data.stationMap;
self.debugMap = event.data.debugMap;
self.mapnificent.redraw();
}
};
};
MapnificentPosition.prototype.workerError = function(){
return function(event){
console.log('error', event);
};
};
MapnificentPosition.prototype.startCalculation = function(){
// this.renderProgress();
// this.marker.openPopup();
this.createWorker();
this.webworker.postMessage({
lat: this.latlng.lat,
lng: this.latlng.lng,
// fromStations: nextStations.map(function(m){ return m[0].id; }),
stations: this.mapnificent.stationList,
lines: this.mapnificent.lines,
// distances: nextStations.map(function(m){ return m[1] / 1000; }),
reportInterval: 5000,
intervalKey: this.mapnificent.settings.intervalKey,
maxWalkTime: this.mapnificent.settings.maxWalkTime,
secondsPerM: this.mapnificent.settings.secondsPerKm / 1000,
searchRadius: this.mapnificent.settings.initialStationSearchRadius,
bounds: this.mapnificent.settings.bounds,
debug: this.mapnificent.settings.debug,
});
};
MapnificentPosition.prototype.getReachableStations = function(stationsAround, start, tileSize) {
var self = this;
var getLngRadius = function(lat, mradius){
var equatorLength = 40075017,
hLength = equatorLength * Math.cos(L.LatLng.DEG_TO_RAD * lat);
return (mradius / hLength) * 360;
};
var maxWalkTime = this.mapnificent.settings.maxWalkTime;
var secondsPerKm = this.mapnificent.settings.secondsPerKm;
var convert = function(station, reachableIn) {
var secs = Math.min((self.time - reachableIn), maxWalkTime);
var mradius = secs * (1 / secondsPerKm) * 1000;
var point = new L.LatLng(station.lat, station.lng);
var lngRadius = getLngRadius(station.lat, mradius);
var latlng2 = new L.LatLng(station.lat, station.lng - lngRadius, true);
var point2 = self.mapnificent.map.latLngToLayerPoint(latlng2);
var lpoint = self.mapnificent.map.latLngToLayerPoint(point);
var radius = Math.max(Math.round(lpoint.x - point2.x), 1);
var p = self.mapnificent.map.project(point);
var x = Math.round(p.x - start.x);
var y = Math.round(p.y - start.y);
if (x + radius < 0 || x - radius > tileSize ||
y + radius < 0 || y - radius > tileSize) {
return null;
}
return {x: x, y: y, r: radius};
};
var stations = [];
if (this.stationMap === null) {
return stations;
}
// You start walking from your position
var station = convert(this.latlng, 0);
if (station !== null) {
stations.push(station);
}
for (var i = 0; i < stationsAround.length; i += 1) {
var stationTime = this.stationMap[stationsAround[i].id];
if (stationTime === undefined || stationTime >= this.time) {
continue;
}
station = convert(stationsAround[i], stationTime);
if (station !== null) {
stations.push(station);
}
}
return stations;
};
MapnificentPosition.prototype.destroy = function(){
// this.mapnificent.map.closePopup(this.popup);
// this.mapnificent.map.removeLayer(this.popup);
// this.mapnificent.map.removeLayer(this.marker);
this.webworker.terminate();
this.webworker = null;
this.stationMap = null;
// this.marker = null;
// this.popup = null;
this.redrawTime = 0;
};
function Mapnificent(map, city, pageConfig){
this.map = map;
this.positions = [];
this.time = 60 * 10;
// FIXME: this is messy
this.city = city;
this.settings = $.extend({
intervalKey: '1-6',
baseurl: '/',
dataPath: city.dataPath || './',
maxWalkTime: 15 * 60,
secondsPerKm: 13 * 60,
initialStationSearchRadius: 1000,
redrawOnTimeDrag: false,
debug: window.location.search.indexOf("debug") !== -1,
}, city);
this.settings.options = $.extend({
maxWalkTravelTime: 1.5 * 60 * 60,
}, this.settings.options)
this.settings = $.extend(this.settings, pageConfig);
}
Mapnificent.prototype.init = function(){
var self = this, t0;
self.tilesLoading = false;
return this.loadData().done(function(data){
self.prepareData(data);
self.canvasTileLayer = L.tileLayer.canvas();
self.canvasTileLayer.on('loading', function(){
self.tilesLoading = true;
t0 = new Date().getTime();
});
self.canvasTileLayer.on('load', function(){
self.tilesLoading = false;
if (self.needsRedraw) {
self.redraw();
}
self.redrawTime = (new Date().getTime()) - t0;
console.log('reloading tile layer took', self.redrawTime, 'ms');
});
self.canvasTileLayer.drawTile = self.drawTile();
self.map.addLayer(self.canvasTileLayer);
// self.map.on('click', function(e) {
// self.addPosition(e.latlng);
// });
// self.map.on('contextmenu', function(e) {
// if (self.settings.debug) {
// self.logDebugMessage(e.latlng);
// }
// });
// self.augmentLeafletHash();
if (self.settings.coordinates) |
});
};
Mapnificent.prototype.logDebugMessage = function(latlng) {
var self = this;
var stationsAround = this.quadtree.searchInRadius(latlng.lat, latlng.lng, 300);
this.positions.forEach(function(pos, i){
console.log('Position ', i);
if (pos.debugMap === undefined) {
console.log('No debug map present');
}
stationsAround.forEach(function(station, j){
var lastTransport;
console.log('Found station', station.Name);
if (pos.debugMap[station.id] === undefined) {
console.log('Not reached');
return;
}
var totalTime = 0
pos.debugMap[station.id].forEach(function(stop, k){
var fromName = '$walking'
var distance
var toStop = self.stationList[stop.to]
if (stop.from !== -1) {
var fromStop = self.stationList[stop.from]
fromName = fromStop.Name
distance = self.quadtree.distanceBetweenCoordinates(
fromStop.Latitude, fromStop.Longitude,
toStop.Latitude, toStop.Longitude
)
}
if (lastTransport != stop.line) {
console.log(k, 'Switching transport to', self.lineNames[stop.line],
'waiting: ', stop.waittime);
}
lastTransport = stop.line;
var currentTime = stop.time - totalTime;
totalTime = stop.time;
console.log(k, fromName, '->',
toStop.Name,
'via', self.lineNames[stop.line],
'in', currentTime,
' (' +
'stay: ' + stop.stay +
', total time: ' + stop.time +
', total walk time: ' + stop.walkTime +
', distance: ' + distance +' meters)');
});
});
});
};
Mapnificent.prototype.loadData = function(){
var dataUrl = this.settings.dataPath + this.settings.cityid;
if (this.settings.debug) {
dataUrl += '__debug';
}
dataUrl += '.bin';
const MAPNIFICENT_PROTO = {"nested":{"mapnificent":{"nested":{"MapnificentNetwork":{"fields":{"Cityid":{"type":"string","id":1},"Stops":{"rule":"repeated","type":"Stop","id":2},"Lines":{"rule":"repeated","type":"Line","id":3}},"nested":{"Stop":{"fields":{"Latitude":{"type":"double","id":1},"Longitude":{"type":"double","id":2},"TravelOptions":{"rule":"repeated","type":"TravelOption","id":3},"Name":{"type":"string","id":4}},"nested":{"TravelOption":{"fields":{"Stop":{"type":"uint32","id":1},"TravelTime":{"type":"uint32","id":2},"StayTime":{"type":"uint32","id":3},"Line":{"type":"string","id":4},"WalkDistance":{"type":"uint32","id":5}}}}},"Line":{"fields":{"LineId":{"type":"string","id":1},"LineTimes":{"rule":"repeated","type":"LineTime","id":2},"Name":{"type":"string","id":3}},"nested":{"LineTime":{"fields":{"Interval":{"type":"uint32","id":1},"Start":{"type":"uint32","id":2},"Stop":{"type":"uint32","id":3},"Weekday":{"type":"uint32","id":4}}}}}}}}}}};
var protoRoot = protobuf.Root.fromJSON(MAPNIFICENT_PROTO);
var d = $.Deferred();
// var loadProgress = $('#load-progress');
// var progressBar = getProgressBar(0.0);
// loadProgress.find('.modal-body').html(progressBar);
// loadProgress.modal('show');
var oReq = new XMLHttpRequest();
oReq.open("GET", dataUrl, true);
oReq.responseType = "arraybuffer";
oReq.onload = function(oEvent) {
var MapnificentNetwork = protoRoot.lookupType('mapnificent.MapnificentNetwork');
console.log('received binary', new Date().getTime());
var message = MapnificentNetwork.decode(new Uint8Array(oEvent.target.response));
console.log('decoded message', new Date().getTime());
// loadProgress.modal('hide');
d.resolve(message);
};
// oReq.addEventListener("progress", function updateProgress (oEvent) {
// if (oEvent.lengthComputable) {
// var percentComplete = oEvent.loaded / oEvent.total * 100;
// updateProgressBar(loadProgress, percentComplete);
// } else {
// updateProgressBar(loadProgress, 100);
// loadProgress.find('.progress').addClass('active progress-striped');
// }
// });
oReq.send();
return d;
};
Mapnificent.prototype.getLineTimesByInterval = function(lineTimes) {
var result = {};
for (var i = 0; i < lineTimes.length; i += 1) {
result[lineTimes[i].Weekday + '-' + lineTimes[i].Start] = lineTimes[i].Interval;
}
return result;
}
Mapnificent.prototype.prepareData = function(data) {
this.stationList = data.Stops;
this.lines = {};
this.lineNames = {};
var selat = Infinity, nwlat = -Infinity, nwlng = Infinity, selng = -Infinity;
for (var i = 0; i < this.stationList.length; i += 1){
this.stationList[i].id = i;
this.stationList[i].lat = data.Stops[i].Latitude;
this.stationList[i].lng = data.Stops[i].Longitude;
selat = Math.min(selat, this.stationList[i].lat);
nwlat = Math.max(nwlat, this.stationList[i].lat);
selng = Math.max(selng, this.stationList[i].lng);
nwlng = Math.min(nwlng, this.stationList[i].lng);
}
for (i = 0; i < data.Lines.length; i += 1) {
if (!data.Lines[i].LineTimes[0]) { continue; }
this.lines[data.Lines[i].LineId] = this.getLineTimesByInterval(data.Lines[i].LineTimes);
if (this.settings.debug) {
this.lineNames[data.Lines[i].LineId] = data.Lines[i].Name;
}
}
var b = 0.01;
this.settings.bounds = [selat - b, nwlat + b, nwlng - b, selng + b];
this.quadtree = Quadtree.create(
this.settings.bounds[0], this.settings.bounds[1],
this.settings.bounds[2], this.settings.bounds[3]
);
this.quadtree.insertAll(this.stationList);
};
Mapnificent.prototype.redraw = function(){
var self = this;
this.needsRedraw = true;
if (this.canvasTileLayer) {
if (this.tilesLoading) {
return;
}
L.Util.requestAnimFrame(function(){
self.needsRedraw = false;
self.canvasTileLayer.redraw();
});
}
};
Mapnificent.prototype.addPosition = function(latlng, time){
this.positions.push(new MapnificentPosition(this, latlng, time));
// this.triggerHashUpdate();
};
Mapnificent.prototype.removePosition = function(pos) {
this.positions = this.positions.filter(function(p){
return p !== pos;
});
pos.destroy();
this.redraw();
// this.triggerHashUpdate();
};
// Mapnificent.prototype.triggerHashUpdate = function() {
// this.hash.onMapMove();
// }
Mapnificent.prototype.drawTile = function() {
var self = this;
var maxWalkTime = this.settings.maxWalkTime;
var secondsPerKm = this.settings.secondsPerKm;
return function(canvas, tilePoint) {
if (!self.stationList || !self.positions.length) {
return;
}
var ctx = canvas.getContext('2d');
ctx.clearRect(0, 0, canvas.width, canvas.height);
/* Figure out how many stations we have to look at around
this tile.
*/
var tileSize = this.options.tileSize;
var start = tilePoint.multiplyBy(tileSize);
var end = start.add([tileSize, 0]);
var startLatLng = this._map.unproject(start);
var endLatLng = this._map.unproject(end);
var spanInMeters = startLatLng.distanceTo(endLatLng);
var maxWalkDistance = maxWalkTime * (1 / secondsPerKm) * 1000;
var middle = start.add([tileSize / 2, tileSize / 2]);
var latlng = this._map.unproject(middle);
var searchRadius = Math.sqrt(spanInMeters * spanInMeters + spanInMeters * spanInMeters);
searchRadius += maxWalkDistance;
var stationsAround = self.quadtree.searchInRadius(latlng.lat, latlng.lng, searchRadius);
ctx.globalCompositeOperation = 'source-over';
ctx.fillStyle = 'rgba(50,50,50,0.4)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.globalCompositeOperation = 'destination-out';
ctx.fillStyle = 'rgba(0,0,0,1)';
for (var i = 0; i < self.positions.length; i += 1) {
var drawStations = self.positions[i].getReachableStations(stationsAround, start, tileSize);
for (var j = 0; j < drawStations.length; j += 1) {
ctx.beginPath();
ctx.arc(drawStations[j].x, drawStations[j].y,
drawStations[j].r, 0, 2 * Math.PI, false);
ctx.fill();
}
}
};
};
Mapnificent.prototype.augmentLeafletHash = function() {
var mapnificent = this;
var formatHash = function(map) {
var center = map.getCenter(),
zoom = map.getZoom(),
precision = Math.max(0, Math.ceil(Math.log(zoom) / Math.LN2));
var params = [
zoom,
center.lat.toFixed(precision),
center.lng.toFixed(precision)
];
mapnificent.positions.forEach(function(pos) {
params.push(pos.time);
params.push(pos.latlng.lat.toFixed(precision));
params.push(pos.latlng.lng.toFixed(precision));
});
return "#" + params.join("/");
}
var parseHash = function(hash) {
if(hash.indexOf('#') === 0) {
hash = hash.substr(1);
}
var args = hash.split("/");
var parsed;
if (args.length < 3) {
return false;
}
var zoom = parseInt(args[0], 10),
lat = parseFloat(args[1]),
lon = parseFloat(args[2]);
if (isNaN(zoom) || isNaN(lat) || isNaN(lon)) {
parsed = false;
} else {
parsed = {
center: new L.LatLng(lat, lon),
zoom: zoom
};
}
var posIndex = 0;
for (var i = 3; i < args.length; i += 3) {
var time = parseInt(args[i], 10);
lat = parseFloat(args[i + 1]);
lon = parseFloat(args[i + 2]);
if (isNaN(time) || isNaN(lat) || isNaN(lon)) {
continue
}
if (mapnificent.positions[posIndex] === undefined) {
mapnificent.addPosition(new L.LatLng(lat, lon), time);
} else {
mapnificent.positions[posIndex].updatePosition(new L.LatLng(lat, lon), time);
}
posIndex += 1;
}
for (i = posIndex; i < mapnificent.positions.length; i += 1) {
mapnificent.removePosition(mapnificent.positions[i]);
}
return parsed;
};
L.Hash.prototype.formatHash = formatHash;
L.Hash.prototype.parseHash = parseHash;
this.hash = new L.Hash(this.map);
};
//
// onMapMove: function() {
// // bail if we're moving the map (updating from a hash),
// // or if the map is not yet loaded
//
// if (this.movingMap || !this.map._loaded) {
// return false;
// }
//
// var hash = this.formatHash(this.map);
// if (this.lastHash != hash) {
// location.replace(hash);
// this.lastHash = hash;
// }
// },
/* new start */
MapnificentPosition.prototype.setProgressCallback = function(callback) {
this.callback = callback;
}
const initialAddPosition = Mapnificent.prototype.addPosition;
Mapnificent.prototype.addPosition = function(latlng, time){
initialAddPosition.bind(this)(latlng, time);
return this.positions[this.positions.length - 1];
}
/* new end */
window.Mapnificent = Mapnificent;
}()); | {
// self.hash.update();
if (self.positions.length === 0) {
self.addPosition(L.latLng(
self.settings.coordinates[1],
self.settings.coordinates[0]
));
}
} | conditional_block |
formula.py | """
Provides the basic classes needed to specify statistical models.
"""
import copy
import types
import numpy as N
__docformat__ = 'restructuredtext'
default_namespace = {}
class term(object):
"""
This class is very simple: it is just a named term in a model formula.
It is also callable: by default it namespace[self.name], where namespace
defaults to formula.default_namespace.
When called in an instance of formula,
the namespace used is that formula's namespace.
"""
def __pow__(self, power):
"""
Raise the quantitative term's values to an integer power, i.e.
polynomial.
"""
try:
power = float(power)
except:
raise ValueError, 'expecting a float'
if power == int(power):
name = '%s^%d' % (self.name, int(power))
else:
name = '%s^%0.2f' % (self.name, power)
value = quantitative(name, func=self, transform=lambda x: N.power(x, power))
value.power = power
value.namespace = self.namespace
return value
def __init__(self, name, func=None, termname=None):
self.name = name
self.__namespace = None
if termname is None:
self.termname = name
else:
self.termname = termname
if type(self.termname) is not types.StringType:
raise ValueError, 'expecting a string for termname'
if func:
self.func = func
# Namespace in which self.name will be looked up in, if needed
def _get_namespace(self):
if isinstance(self.__namespace, N.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def __str__(self):
"""
'<term: %s>' % self.termname
"""
return '<term: %s>' % self.termname
def __add__(self, other):
"""
formula(self) + formula(other)
"""
other = formula(other, namespace=self.namespace)
f = other + self
f.namespace = self.namespace
return f
def __mul__(self, other):
"""
formula(self) * formula(other)
"""
if other.name is 'intercept':
f = formula(self, namespace=self.namespace)
elif self.name is 'intercept':
f = formula(other, namespace=other.namespace)
else:
other = formula(other, namespace=self.namespace)
f = other * self
f.namespace = self.namespace
return f
def names(self):
"""
Return the names of the columns in design associated to the terms,
i.e. len(self.names()) = self().shape[0].
"""
if type(self.name) is types.StringType:
return [self.name]
else:
return list(self.name)
def __call__(self, *args, **kw):
"""
Return the columns associated to self in a design matrix.
If the term has no 'func' attribute, it returns
``self.namespace[self.termname]``
else, it returns
``self.func(*args, **kw)``
"""
if not hasattr(self, 'func'):
val = self.namespace[self.termname]
else:
val = self.func
if callable(val):
if hasattr(val, "namespace"):
val.namespace = self.namespace
val = val(*args, **kw)
val = N.asarray(val)
return N.squeeze(val)
class factor(term):
"""
A categorical factor.
"""
def __init__(self, termname, keys, ordinal=False):
"""
factor is initialized with keys, representing all valid
levels of the factor.
"""
self.keys = list(set(keys))
self.keys.sort()
self._name = termname
self.termname = termname
self.ordinal = ordinal
if self.ordinal:
name = self.name
else:
name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]
term.__init__(self, name, termname=self.termname, func=self.get_columns)
def get_columns(self, *args, **kw):
"""
Calling function for factor instance.
"""
v = self.namespace[self._name]
while True:
if callable(v):
if hasattr(v, "namespace"):
v.namespace = self.namespace
v = v(*args, **kw)
else: break
if self.ordinal:
col = [float(self.keys.index(v[i])) for i in range(len(self.keys))]
return N.array(col)
else:
n = len(v)
value = []
for key in self.keys:
col = [float((v[i] == key)) for i in range(n)]
value.append(col)
return N.array(value)
def values(self, *args, **kw):
"""
Return the keys of the factor, rather than the columns of the design
matrix.
"""
del(self.func)
val = self(*args, **kw)
self.func = self.get_columns
return val
def verify(self, values):
"""
Verify that all values correspond to valid keys in self.
"""
s = set(values)
if not s.issubset(self.keys):
raise ValueError, 'unknown keys in values'
def | (self, other):
"""
formula(self) + formula(other)
When adding \'intercept\' to a factor, this just returns
formula(self, namespace=self.namespace)
"""
if other.name is 'intercept':
return formula(self, namespace=self.namespace)
else:
return term.__add__(self, other)
def main_effect(self, reference=None):
"""
Return the 'main effect' columns of a factor, choosing
a reference column number to remove.
"""
if reference is None:
reference = 0
names = self.names()
def maineffect_func(value, reference=reference):
rvalue = []
keep = range(value.shape[0])
keep.pop(reference)
for i in range(len(keep)):
rvalue.append(value[keep[i]] - value[reference])
return N.array(rvalue)
keep = range(len(self.names()))
keep.pop(reference)
__names = self.names()
_names = ['%s-%s' % (__names[keep[i]], __names[reference]) for i in range(len(keep))]
value = quantitative(_names, func=self,
termname='%s:maineffect' % self.termname,
transform=maineffect_func)
value.namespace = self.namespace
return value
class quantitative(term):
"""
A subclass of term that can be used to apply point transformations
of another term, i.e. to take powers:
>>> import numpy as N
>>> from scipy.sandbox.models import formula
>>> X = N.linspace(0,10,101)
>>> x = formula.term('X')
>>> x.namespace={'X':X}
>>> x2 = x**2
>>> print N.allclose(x()**2, x2())
True
>>> x3 = formula.quantitative('x2', func=x, transform=lambda x: x**2)
>>> x3.namespace = x.namespace
>>> print N.allclose(x()**2, x3())
True
"""
def __init__(self, name, func=None, termname=None, transform=lambda x: x):
self.transform = transform
term.__init__(self, name, func=func, termname=termname)
def __call__(self, *args, **kw):
"""
A quantitative is just like term, except there is an additional
transformation: self.transform.
"""
return self.transform(term.__call__(self, *args, **kw))
class formula(object):
"""
A formula object for manipulating design matrices in regression models,
essentially consisting of a list of term instances.
The object supports addition and multiplication which correspond
to concatenation and pairwise multiplication, respectively,
of the columns of the two formulas.
"""
def _get_namespace(self):
if isinstance(self.__namespace, N.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def _terms_changed(self):
self._names = self.names()
self._termnames = self.termnames()
def __init__(self, termlist, namespace=default_namespace):
"""
Create a formula from either:
i. a `formula` object
ii. a sequence of `term` instances
iii. one `term`
"""
self.__namespace = namespace
if isinstance(termlist, formula):
self.terms = copy.copy(list(termlist.terms))
elif type(termlist) is types.ListType:
self.terms = termlist
elif isinstance(termlist, term):
self.terms = [termlist]
else:
raise ValueError
self._terms_changed()
def __str__(self):
"""
String representation of list of termnames of a formula.
"""
value = []
for term in self.terms:
value += [term.termname]
return '<formula: %s>' % ' + '.join(value)
def __call__(self, *args, **kw):
"""
Create (transpose) of the design matrix of the formula within
namespace. Extra arguments are passed to each term instance. If
the formula just contains an intercept, then the keyword
argument 'nrow' indicates the number of rows (observations).
"""
if 'namespace' in kw:
namespace = kw['namespace']
else:
namespace = self.namespace
allvals = []
intercept = False
iindex = 0
for t in self.terms:
t.namespace = namespace
val = t(*args, **kw)
isintercept = False
if hasattr(t, "termname"):
if t.termname == 'intercept':
intercept = True
isintercept = True
interceptindex = iindex
allvals.append(None)
if val.ndim == 1 and not isintercept:
val.shape = (1, val.shape[0])
allvals.append(val)
elif not isintercept:
allvals.append(val)
iindex += 1
if not intercept:
try:
allvals = N.concatenate(allvals)
except:
pass
else:
nrow = kw.get('nrow', -1)
if allvals != []:
if interceptindex > 0:
n = allvals[0].shape[1]
else:
n = allvals[1].shape[1]
allvals[interceptindex] = N.ones((1,n), N.float64)
allvals = N.concatenate(allvals)
elif nrow <= 1:
raise ValueError, 'with only intercept in formula, keyword \'nrow\' argument needed'
else:
allvals = I(nrow=nrow)
allvals.shape = (1,) + allvals.shape
return allvals
def hasterm(self, query_term):
"""
Determine whether a given term is in a formula.
"""
if not isinstance(query_term, formula):
if type(query_term) == type("name"):
try: query = self[query_term]
except: return False
elif isinstance(query_term, term):
return query_term.termname in self.termnames()
elif len(query_term.terms) == 1:
query_term = query_term.terms[0]
return query_term.termname in self.termnames()
else:
raise ValueError, 'more than one term passed to hasterm'
def __getitem__(self, name):
t = self.termnames()
if name in t:
return self.terms[t.index(name)]
else:
raise KeyError, 'formula has no such term: %s' % repr(name)
def termcolumns(self, query_term, dict=False):
"""
Return a list of the indices of all columns associated
to a given term.
"""
if self.hasterm(query_term):
names = query_term.names()
value = {}
for name in names:
value[name] = self._names.index(name)
else:
raise ValueError, 'term not in formula'
if dict:
return value
else:
return value.values()
def names(self):
"""
Return a list of the names in the formula. The order of the
names corresponds to the order of the columns when self
is evaluated.
"""
allnames = []
for term in self.terms:
allnames += term.names()
return allnames
def termnames(self):
"""
Return a list of the term names in the formula. These
are the names of each term instance in self.
"""
names = []
for term in self.terms:
names += [term.termname]
return names
def design(self, *args, **kw):
"""
``transpose(self(*args, **kw))``
"""
return self(*args, **kw).T
def __mul__(self, other, nested=False):
"""
This returns a formula whose columns are the pairwise
product of the columns of self and other.
TO DO: check for nesting relationship. Should not be too difficult.
"""
other = formula(other, namespace=self.namespace)
selftermnames = self.termnames()
othertermnames = other.termnames()
I = len(selftermnames)
J = len(othertermnames)
terms = []
termnames = []
for i in range(I):
for j in range(J):
termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))
pieces = termname.split('*')
pieces.sort()
termname = '*'.join(pieces)
termnames.append(termname)
selfnames = self.terms[i].names()
othernames = other.terms[j].names()
if self.terms[i].name is 'intercept':
_term = other.terms[j]
_term.namespace = other.namespace
elif other.terms[j].name is 'intercept':
_term = self.terms[i]
_term.namespace = self.namespace
else:
names = []
d1 = len(selfnames)
d2 = len(othernames)
for r in range(d1):
for s in range(d2):
name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))
pieces = name.split('*')
pieces.sort()
name = '*'.join(pieces)
names.append(name)
def product_func(value, d1=d1, d2=d2):
out = []
for r in range(d1):
for s in range(d2):
out.append(value[r] * value[d1+s])
return N.array(out)
sumterms = self + other
sumterms.terms = [self, other] # enforce the order we want
sumterms.namespace = self.namespace
_term = quantitative(names, func=sumterms, termname=termname,
transform=product_func)
_term.namespace = self.namespace
terms.append(_term)
return formula(terms, namespace=self.namespace)
def __add__(self, other):
"""
Return a formula whose columns are the
concatenation of the columns of self and other.
terms in the formula are sorted alphabetically.
"""
other = formula(other, namespace=self.namespace)
terms = self.terms + other.terms
pieces = [(term.name, term) for term in terms]
pieces.sort()
terms = [piece[1] for piece in pieces]
return formula(terms, namespace=self.namespace)
def __sub__(self, other):
"""
Return a formula with all terms in other removed from self.
If other contains term instances not in formula, this
function does not raise an exception.
"""
other = formula(other, namespace=self.namespace)
terms = copy.copy(self.terms)
for term in other.terms:
for i in range(len(terms)):
if terms[i].termname == term.termname:
terms.pop(i)
break
return formula(terms, namespace=self.namespace)
def isnested(A, B, namespace=globals()):
"""
Is factor B nested within factor A or vice versa: a very crude test
which depends on the namespace.
If they are nested, returns (True, F) where F is the finest
level of the relationship. Otherwise, returns (False, None)
"""
a = A(namespace, values=True)[0]
b = B(namespace, values=True)[0]
if len(a) != len(b):
raise ValueError, 'A() and B() should be sequences of the same length'
nA = len(set(a))
nB = len(set(b))
n = max(nA, nB)
AB = [(a[i],b[i]) for i in range(len(a))]
nAB = len(set(AB))
if nAB == n:
if nA > nB:
F = A
else:
F = B
return (True, F)
else:
return (False, None)
def _intercept_fn(nrow=1, **extra):
return N.ones((1,nrow))
I = term('intercept', func=_intercept_fn)
I.__doc__ = """
Intercept term in a formula. If intercept is the
only term in the formula, then a keywords argument
\'nrow\' is needed.
>>> from scipy.sandbox.models.formula import formula, I
>>> I()
array(1.0)
>>> I(nrow=5)
array([ 1., 1., 1., 1., 1.])
>>> f=formula(I)
>>> f(nrow=5)
array([1, 1, 1, 1, 1])
"""
| __add__ | identifier_name |
formula.py | """
Provides the basic classes needed to specify statistical models.
"""
import copy
import types
import numpy as N
__docformat__ = 'restructuredtext'
default_namespace = {}
class term(object):
"""
This class is very simple: it is just a named term in a model formula.
It is also callable: by default it namespace[self.name], where namespace
defaults to formula.default_namespace.
When called in an instance of formula,
the namespace used is that formula's namespace.
"""
def __pow__(self, power):
"""
Raise the quantitative term's values to an integer power, i.e.
polynomial.
"""
try:
power = float(power)
except:
raise ValueError, 'expecting a float'
if power == int(power):
name = '%s^%d' % (self.name, int(power))
else:
name = '%s^%0.2f' % (self.name, power)
value = quantitative(name, func=self, transform=lambda x: N.power(x, power))
value.power = power
value.namespace = self.namespace
return value
def __init__(self, name, func=None, termname=None):
self.name = name
self.__namespace = None
if termname is None:
self.termname = name
else:
self.termname = termname
if type(self.termname) is not types.StringType:
raise ValueError, 'expecting a string for termname'
if func:
self.func = func
# Namespace in which self.name will be looked up in, if needed
def _get_namespace(self):
if isinstance(self.__namespace, N.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def __str__(self):
"""
'<term: %s>' % self.termname
"""
return '<term: %s>' % self.termname
def __add__(self, other):
"""
formula(self) + formula(other)
"""
other = formula(other, namespace=self.namespace)
f = other + self
f.namespace = self.namespace
return f
def __mul__(self, other):
"""
formula(self) * formula(other)
"""
if other.name is 'intercept':
f = formula(self, namespace=self.namespace)
elif self.name is 'intercept':
f = formula(other, namespace=other.namespace)
else:
other = formula(other, namespace=self.namespace)
f = other * self
f.namespace = self.namespace
return f
def names(self):
"""
Return the names of the columns in design associated to the terms,
i.e. len(self.names()) = self().shape[0].
"""
if type(self.name) is types.StringType:
return [self.name]
else:
return list(self.name)
def __call__(self, *args, **kw):
"""
Return the columns associated to self in a design matrix.
If the term has no 'func' attribute, it returns
``self.namespace[self.termname]``
else, it returns
``self.func(*args, **kw)``
"""
if not hasattr(self, 'func'):
val = self.namespace[self.termname]
else:
val = self.func
if callable(val):
if hasattr(val, "namespace"):
val.namespace = self.namespace
val = val(*args, **kw)
val = N.asarray(val)
return N.squeeze(val)
class factor(term):
"""
A categorical factor.
"""
def __init__(self, termname, keys, ordinal=False):
"""
factor is initialized with keys, representing all valid
levels of the factor.
"""
self.keys = list(set(keys))
self.keys.sort()
self._name = termname
self.termname = termname
self.ordinal = ordinal
if self.ordinal:
name = self.name
else:
name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]
term.__init__(self, name, termname=self.termname, func=self.get_columns)
def get_columns(self, *args, **kw):
"""
Calling function for factor instance.
"""
v = self.namespace[self._name]
while True:
if callable(v):
if hasattr(v, "namespace"):
v.namespace = self.namespace
v = v(*args, **kw)
else: break
if self.ordinal:
col = [float(self.keys.index(v[i])) for i in range(len(self.keys))]
return N.array(col)
else:
n = len(v)
value = []
for key in self.keys:
col = [float((v[i] == key)) for i in range(n)]
value.append(col)
return N.array(value)
def values(self, *args, **kw):
"""
Return the keys of the factor, rather than the columns of the design
matrix.
"""
del(self.func)
val = self(*args, **kw)
self.func = self.get_columns
return val
def verify(self, values):
"""
Verify that all values correspond to valid keys in self.
"""
s = set(values)
if not s.issubset(self.keys):
raise ValueError, 'unknown keys in values'
def __add__(self, other):
"""
formula(self) + formula(other)
When adding \'intercept\' to a factor, this just returns
formula(self, namespace=self.namespace)
"""
if other.name is 'intercept':
return formula(self, namespace=self.namespace)
else:
return term.__add__(self, other)
def main_effect(self, reference=None):
"""
Return the 'main effect' columns of a factor, choosing
a reference column number to remove.
"""
if reference is None:
reference = 0
names = self.names()
def maineffect_func(value, reference=reference):
rvalue = []
keep = range(value.shape[0])
keep.pop(reference)
for i in range(len(keep)):
rvalue.append(value[keep[i]] - value[reference])
return N.array(rvalue)
keep = range(len(self.names()))
keep.pop(reference)
__names = self.names()
_names = ['%s-%s' % (__names[keep[i]], __names[reference]) for i in range(len(keep))]
value = quantitative(_names, func=self,
termname='%s:maineffect' % self.termname,
transform=maineffect_func)
value.namespace = self.namespace
return value
class quantitative(term):
"""
A subclass of term that can be used to apply point transformations
of another term, i.e. to take powers:
>>> import numpy as N
>>> from scipy.sandbox.models import formula
>>> X = N.linspace(0,10,101)
>>> x = formula.term('X')
>>> x.namespace={'X':X}
>>> x2 = x**2
>>> print N.allclose(x()**2, x2())
True
>>> x3 = formula.quantitative('x2', func=x, transform=lambda x: x**2)
>>> x3.namespace = x.namespace
>>> print N.allclose(x()**2, x3())
True
"""
def __init__(self, name, func=None, termname=None, transform=lambda x: x):
self.transform = transform
term.__init__(self, name, func=func, termname=termname)
def __call__(self, *args, **kw):
"""
A quantitative is just like term, except there is an additional
transformation: self.transform.
"""
return self.transform(term.__call__(self, *args, **kw))
class formula(object):
"""
A formula object for manipulating design matrices in regression models,
essentially consisting of a list of term instances.
The object supports addition and multiplication which correspond
to concatenation and pairwise multiplication, respectively,
of the columns of the two formulas.
"""
def _get_namespace(self):
if isinstance(self.__namespace, N.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def _terms_changed(self):
self._names = self.names()
self._termnames = self.termnames()
def __init__(self, termlist, namespace=default_namespace):
"""
Create a formula from either:
i. a `formula` object
ii. a sequence of `term` instances
iii. one `term`
"""
self.__namespace = namespace
if isinstance(termlist, formula):
self.terms = copy.copy(list(termlist.terms))
elif type(termlist) is types.ListType:
self.terms = termlist
elif isinstance(termlist, term):
self.terms = [termlist]
else:
raise ValueError
self._terms_changed()
def __str__(self):
"""
String representation of list of termnames of a formula.
"""
value = []
for term in self.terms:
value += [term.termname]
return '<formula: %s>' % ' + '.join(value)
def __call__(self, *args, **kw):
"""
Create (transpose) of the design matrix of the formula within
namespace. Extra arguments are passed to each term instance. If
the formula just contains an intercept, then the keyword
argument 'nrow' indicates the number of rows (observations).
"""
if 'namespace' in kw:
namespace = kw['namespace']
else:
namespace = self.namespace
allvals = []
intercept = False
iindex = 0
for t in self.terms:
t.namespace = namespace
val = t(*args, **kw)
isintercept = False
if hasattr(t, "termname"):
if t.termname == 'intercept':
intercept = True
isintercept = True
interceptindex = iindex
allvals.append(None)
if val.ndim == 1 and not isintercept:
val.shape = (1, val.shape[0])
allvals.append(val)
elif not isintercept:
allvals.append(val)
iindex += 1
if not intercept:
try:
allvals = N.concatenate(allvals)
except:
pass
else:
nrow = kw.get('nrow', -1)
if allvals != []:
if interceptindex > 0:
n = allvals[0].shape[1]
else:
n = allvals[1].shape[1]
allvals[interceptindex] = N.ones((1,n), N.float64)
allvals = N.concatenate(allvals)
elif nrow <= 1:
raise ValueError, 'with only intercept in formula, keyword \'nrow\' argument needed'
else:
allvals = I(nrow=nrow)
allvals.shape = (1,) + allvals.shape
return allvals
def hasterm(self, query_term):
"""
Determine whether a given term is in a formula.
"""
if not isinstance(query_term, formula):
if type(query_term) == type("name"):
try: query = self[query_term]
except: return False
elif isinstance(query_term, term):
return query_term.termname in self.termnames()
elif len(query_term.terms) == 1:
query_term = query_term.terms[0]
return query_term.termname in self.termnames()
else:
raise ValueError, 'more than one term passed to hasterm'
def __getitem__(self, name):
t = self.termnames()
if name in t:
return self.terms[t.index(name)]
else:
raise KeyError, 'formula has no such term: %s' % repr(name)
def termcolumns(self, query_term, dict=False):
"""
Return a list of the indices of all columns associated
to a given term.
"""
if self.hasterm(query_term):
names = query_term.names()
value = {}
for name in names:
value[name] = self._names.index(name)
else:
raise ValueError, 'term not in formula'
if dict:
return value
else:
return value.values()
def names(self):
"""
Return a list of the names in the formula. The order of the
names corresponds to the order of the columns when self
is evaluated.
"""
allnames = []
for term in self.terms:
allnames += term.names()
return allnames
def termnames(self):
"""
Return a list of the term names in the formula. These
are the names of each term instance in self.
"""
names = []
for term in self.terms:
names += [term.termname]
return names
def design(self, *args, **kw):
"""
``transpose(self(*args, **kw))``
"""
return self(*args, **kw).T
def __mul__(self, other, nested=False):
"""
This returns a formula whose columns are the pairwise
product of the columns of self and other.
TO DO: check for nesting relationship. Should not be too difficult.
"""
other = formula(other, namespace=self.namespace)
selftermnames = self.termnames()
othertermnames = other.termnames()
I = len(selftermnames)
J = len(othertermnames)
terms = []
termnames = []
for i in range(I):
for j in range(J):
termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))
pieces = termname.split('*')
pieces.sort()
termname = '*'.join(pieces)
termnames.append(termname)
selfnames = self.terms[i].names()
othernames = other.terms[j].names()
if self.terms[i].name is 'intercept':
_term = other.terms[j]
_term.namespace = other.namespace
elif other.terms[j].name is 'intercept':
_term = self.terms[i]
_term.namespace = self.namespace
else:
names = []
d1 = len(selfnames)
d2 = len(othernames)
for r in range(d1):
for s in range(d2):
name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))
pieces = name.split('*')
pieces.sort()
name = '*'.join(pieces)
names.append(name)
def product_func(value, d1=d1, d2=d2):
| return N.array(out)
sumterms = self + other
sumterms.terms = [self, other] # enforce the order we want
sumterms.namespace = self.namespace
_term = quantitative(names, func=sumterms, termname=termname,
transform=product_func)
_term.namespace = self.namespace
terms.append(_term)
return formula(terms, namespace=self.namespace)
def __add__(self, other):
"""
Return a formula whose columns are the
concatenation of the columns of self and other.
terms in the formula are sorted alphabetically.
"""
other = formula(other, namespace=self.namespace)
terms = self.terms + other.terms
pieces = [(term.name, term) for term in terms]
pieces.sort()
terms = [piece[1] for piece in pieces]
return formula(terms, namespace=self.namespace)
def __sub__(self, other):
"""
Return a formula with all terms in other removed from self.
If other contains term instances not in formula, this
function does not raise an exception.
"""
other = formula(other, namespace=self.namespace)
terms = copy.copy(self.terms)
for term in other.terms:
for i in range(len(terms)):
if terms[i].termname == term.termname:
terms.pop(i)
break
return formula(terms, namespace=self.namespace)
def isnested(A, B, namespace=globals()):
"""
Is factor B nested within factor A or vice versa: a very crude test
which depends on the namespace.
If they are nested, returns (True, F) where F is the finest
level of the relationship. Otherwise, returns (False, None)
"""
a = A(namespace, values=True)[0]
b = B(namespace, values=True)[0]
if len(a) != len(b):
raise ValueError, 'A() and B() should be sequences of the same length'
nA = len(set(a))
nB = len(set(b))
n = max(nA, nB)
AB = [(a[i],b[i]) for i in range(len(a))]
nAB = len(set(AB))
if nAB == n:
if nA > nB:
F = A
else:
F = B
return (True, F)
else:
return (False, None)
def _intercept_fn(nrow=1, **extra):
return N.ones((1,nrow))
I = term('intercept', func=_intercept_fn)
I.__doc__ = """
Intercept term in a formula. If intercept is the
only term in the formula, then a keywords argument
\'nrow\' is needed.
>>> from scipy.sandbox.models.formula import formula, I
>>> I()
array(1.0)
>>> I(nrow=5)
array([ 1., 1., 1., 1., 1.])
>>> f=formula(I)
>>> f(nrow=5)
array([1, 1, 1, 1, 1])
""" | out = []
for r in range(d1):
for s in range(d2):
out.append(value[r] * value[d1+s]) | random_line_split |
formula.py | """
Provides the basic classes needed to specify statistical models.
"""
import copy
import types
import numpy as N
__docformat__ = 'restructuredtext'
default_namespace = {}
class term(object):
"""
This class is very simple: it is just a named term in a model formula.
It is also callable: by default it namespace[self.name], where namespace
defaults to formula.default_namespace.
When called in an instance of formula,
the namespace used is that formula's namespace.
"""
def __pow__(self, power):
"""
Raise the quantitative term's values to an integer power, i.e.
polynomial.
"""
try:
power = float(power)
except:
raise ValueError, 'expecting a float'
if power == int(power):
name = '%s^%d' % (self.name, int(power))
else:
name = '%s^%0.2f' % (self.name, power)
value = quantitative(name, func=self, transform=lambda x: N.power(x, power))
value.power = power
value.namespace = self.namespace
return value
def __init__(self, name, func=None, termname=None):
self.name = name
self.__namespace = None
if termname is None:
self.termname = name
else:
self.termname = termname
if type(self.termname) is not types.StringType:
raise ValueError, 'expecting a string for termname'
if func:
self.func = func
# Namespace in which self.name will be looked up in, if needed
def _get_namespace(self):
if isinstance(self.__namespace, N.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def __str__(self):
"""
'<term: %s>' % self.termname
"""
return '<term: %s>' % self.termname
def __add__(self, other):
"""
formula(self) + formula(other)
"""
other = formula(other, namespace=self.namespace)
f = other + self
f.namespace = self.namespace
return f
def __mul__(self, other):
"""
formula(self) * formula(other)
"""
if other.name is 'intercept':
f = formula(self, namespace=self.namespace)
elif self.name is 'intercept':
f = formula(other, namespace=other.namespace)
else:
other = formula(other, namespace=self.namespace)
f = other * self
f.namespace = self.namespace
return f
def names(self):
"""
Return the names of the columns in design associated to the terms,
i.e. len(self.names()) = self().shape[0].
"""
if type(self.name) is types.StringType:
return [self.name]
else:
return list(self.name)
def __call__(self, *args, **kw):
"""
Return the columns associated to self in a design matrix.
If the term has no 'func' attribute, it returns
``self.namespace[self.termname]``
else, it returns
``self.func(*args, **kw)``
"""
if not hasattr(self, 'func'):
val = self.namespace[self.termname]
else:
val = self.func
if callable(val):
if hasattr(val, "namespace"):
val.namespace = self.namespace
val = val(*args, **kw)
val = N.asarray(val)
return N.squeeze(val)
class factor(term):
"""
A categorical factor.
"""
def __init__(self, termname, keys, ordinal=False):
"""
factor is initialized with keys, representing all valid
levels of the factor.
"""
self.keys = list(set(keys))
self.keys.sort()
self._name = termname
self.termname = termname
self.ordinal = ordinal
if self.ordinal:
name = self.name
else:
name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]
term.__init__(self, name, termname=self.termname, func=self.get_columns)
def get_columns(self, *args, **kw):
"""
Calling function for factor instance.
"""
v = self.namespace[self._name]
while True:
if callable(v):
if hasattr(v, "namespace"):
v.namespace = self.namespace
v = v(*args, **kw)
else: break
if self.ordinal:
col = [float(self.keys.index(v[i])) for i in range(len(self.keys))]
return N.array(col)
else:
n = len(v)
value = []
for key in self.keys:
col = [float((v[i] == key)) for i in range(n)]
value.append(col)
return N.array(value)
def values(self, *args, **kw):
"""
Return the keys of the factor, rather than the columns of the design
matrix.
"""
del(self.func)
val = self(*args, **kw)
self.func = self.get_columns
return val
def verify(self, values):
"""
Verify that all values correspond to valid keys in self.
"""
s = set(values)
if not s.issubset(self.keys):
raise ValueError, 'unknown keys in values'
def __add__(self, other):
"""
formula(self) + formula(other)
When adding \'intercept\' to a factor, this just returns
formula(self, namespace=self.namespace)
"""
if other.name is 'intercept':
return formula(self, namespace=self.namespace)
else:
return term.__add__(self, other)
def main_effect(self, reference=None):
"""
Return the 'main effect' columns of a factor, choosing
a reference column number to remove.
"""
if reference is None:
reference = 0
names = self.names()
def maineffect_func(value, reference=reference):
rvalue = []
keep = range(value.shape[0])
keep.pop(reference)
for i in range(len(keep)):
rvalue.append(value[keep[i]] - value[reference])
return N.array(rvalue)
keep = range(len(self.names()))
keep.pop(reference)
__names = self.names()
_names = ['%s-%s' % (__names[keep[i]], __names[reference]) for i in range(len(keep))]
value = quantitative(_names, func=self,
termname='%s:maineffect' % self.termname,
transform=maineffect_func)
value.namespace = self.namespace
return value
class quantitative(term):
"""
A subclass of term that can be used to apply point transformations
of another term, i.e. to take powers:
>>> import numpy as N
>>> from scipy.sandbox.models import formula
>>> X = N.linspace(0,10,101)
>>> x = formula.term('X')
>>> x.namespace={'X':X}
>>> x2 = x**2
>>> print N.allclose(x()**2, x2())
True
>>> x3 = formula.quantitative('x2', func=x, transform=lambda x: x**2)
>>> x3.namespace = x.namespace
>>> print N.allclose(x()**2, x3())
True
"""
def __init__(self, name, func=None, termname=None, transform=lambda x: x):
self.transform = transform
term.__init__(self, name, func=func, termname=termname)
def __call__(self, *args, **kw):
"""
A quantitative is just like term, except there is an additional
transformation: self.transform.
"""
return self.transform(term.__call__(self, *args, **kw))
class formula(object):
"""
A formula object for manipulating design matrices in regression models,
essentially consisting of a list of term instances.
The object supports addition and multiplication which correspond
to concatenation and pairwise multiplication, respectively,
of the columns of the two formulas.
"""
def _get_namespace(self):
if isinstance(self.__namespace, N.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def _terms_changed(self):
self._names = self.names()
self._termnames = self.termnames()
def __init__(self, termlist, namespace=default_namespace):
"""
Create a formula from either:
i. a `formula` object
ii. a sequence of `term` instances
iii. one `term`
"""
self.__namespace = namespace
if isinstance(termlist, formula):
self.terms = copy.copy(list(termlist.terms))
elif type(termlist) is types.ListType:
self.terms = termlist
elif isinstance(termlist, term):
self.terms = [termlist]
else:
raise ValueError
self._terms_changed()
def __str__(self):
"""
String representation of list of termnames of a formula.
"""
value = []
for term in self.terms:
value += [term.termname]
return '<formula: %s>' % ' + '.join(value)
def __call__(self, *args, **kw):
"""
Create (transpose) of the design matrix of the formula within
namespace. Extra arguments are passed to each term instance. If
the formula just contains an intercept, then the keyword
argument 'nrow' indicates the number of rows (observations).
"""
if 'namespace' in kw:
namespace = kw['namespace']
else:
namespace = self.namespace
allvals = []
intercept = False
iindex = 0
for t in self.terms:
t.namespace = namespace
val = t(*args, **kw)
isintercept = False
if hasattr(t, "termname"):
if t.termname == 'intercept':
intercept = True
isintercept = True
interceptindex = iindex
allvals.append(None)
if val.ndim == 1 and not isintercept:
val.shape = (1, val.shape[0])
allvals.append(val)
elif not isintercept:
allvals.append(val)
iindex += 1
if not intercept:
try:
allvals = N.concatenate(allvals)
except:
pass
else:
nrow = kw.get('nrow', -1)
if allvals != []:
if interceptindex > 0:
n = allvals[0].shape[1]
else:
n = allvals[1].shape[1]
allvals[interceptindex] = N.ones((1,n), N.float64)
allvals = N.concatenate(allvals)
elif nrow <= 1:
raise ValueError, 'with only intercept in formula, keyword \'nrow\' argument needed'
else:
allvals = I(nrow=nrow)
allvals.shape = (1,) + allvals.shape
return allvals
def hasterm(self, query_term):
"""
Determine whether a given term is in a formula.
"""
if not isinstance(query_term, formula):
if type(query_term) == type("name"):
try: query = self[query_term]
except: return False
elif isinstance(query_term, term):
return query_term.termname in self.termnames()
elif len(query_term.terms) == 1:
query_term = query_term.terms[0]
return query_term.termname in self.termnames()
else:
raise ValueError, 'more than one term passed to hasterm'
def __getitem__(self, name):
|
def termcolumns(self, query_term, dict=False):
"""
Return a list of the indices of all columns associated
to a given term.
"""
if self.hasterm(query_term):
names = query_term.names()
value = {}
for name in names:
value[name] = self._names.index(name)
else:
raise ValueError, 'term not in formula'
if dict:
return value
else:
return value.values()
def names(self):
"""
Return a list of the names in the formula. The order of the
names corresponds to the order of the columns when self
is evaluated.
"""
allnames = []
for term in self.terms:
allnames += term.names()
return allnames
def termnames(self):
"""
Return a list of the term names in the formula. These
are the names of each term instance in self.
"""
names = []
for term in self.terms:
names += [term.termname]
return names
def design(self, *args, **kw):
"""
``transpose(self(*args, **kw))``
"""
return self(*args, **kw).T
def __mul__(self, other, nested=False):
"""
This returns a formula whose columns are the pairwise
product of the columns of self and other.
TO DO: check for nesting relationship. Should not be too difficult.
"""
other = formula(other, namespace=self.namespace)
selftermnames = self.termnames()
othertermnames = other.termnames()
I = len(selftermnames)
J = len(othertermnames)
terms = []
termnames = []
for i in range(I):
for j in range(J):
termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))
pieces = termname.split('*')
pieces.sort()
termname = '*'.join(pieces)
termnames.append(termname)
selfnames = self.terms[i].names()
othernames = other.terms[j].names()
if self.terms[i].name is 'intercept':
_term = other.terms[j]
_term.namespace = other.namespace
elif other.terms[j].name is 'intercept':
_term = self.terms[i]
_term.namespace = self.namespace
else:
names = []
d1 = len(selfnames)
d2 = len(othernames)
for r in range(d1):
for s in range(d2):
name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))
pieces = name.split('*')
pieces.sort()
name = '*'.join(pieces)
names.append(name)
def product_func(value, d1=d1, d2=d2):
out = []
for r in range(d1):
for s in range(d2):
out.append(value[r] * value[d1+s])
return N.array(out)
sumterms = self + other
sumterms.terms = [self, other] # enforce the order we want
sumterms.namespace = self.namespace
_term = quantitative(names, func=sumterms, termname=termname,
transform=product_func)
_term.namespace = self.namespace
terms.append(_term)
return formula(terms, namespace=self.namespace)
def __add__(self, other):
"""
Return a formula whose columns are the
concatenation of the columns of self and other.
terms in the formula are sorted alphabetically.
"""
other = formula(other, namespace=self.namespace)
terms = self.terms + other.terms
pieces = [(term.name, term) for term in terms]
pieces.sort()
terms = [piece[1] for piece in pieces]
return formula(terms, namespace=self.namespace)
def __sub__(self, other):
"""
Return a formula with all terms in other removed from self.
If other contains term instances not in formula, this
function does not raise an exception.
"""
other = formula(other, namespace=self.namespace)
terms = copy.copy(self.terms)
for term in other.terms:
for i in range(len(terms)):
if terms[i].termname == term.termname:
terms.pop(i)
break
return formula(terms, namespace=self.namespace)
def isnested(A, B, namespace=globals()):
"""
Is factor B nested within factor A or vice versa: a very crude test
which depends on the namespace.
If they are nested, returns (True, F) where F is the finest
level of the relationship. Otherwise, returns (False, None)
"""
a = A(namespace, values=True)[0]
b = B(namespace, values=True)[0]
if len(a) != len(b):
raise ValueError, 'A() and B() should be sequences of the same length'
nA = len(set(a))
nB = len(set(b))
n = max(nA, nB)
AB = [(a[i],b[i]) for i in range(len(a))]
nAB = len(set(AB))
if nAB == n:
if nA > nB:
F = A
else:
F = B
return (True, F)
else:
return (False, None)
def _intercept_fn(nrow=1, **extra):
return N.ones((1,nrow))
I = term('intercept', func=_intercept_fn)
I.__doc__ = """
Intercept term in a formula. If intercept is the
only term in the formula, then a keywords argument
\'nrow\' is needed.
>>> from scipy.sandbox.models.formula import formula, I
>>> I()
array(1.0)
>>> I(nrow=5)
array([ 1., 1., 1., 1., 1.])
>>> f=formula(I)
>>> f(nrow=5)
array([1, 1, 1, 1, 1])
"""
| t = self.termnames()
if name in t:
return self.terms[t.index(name)]
else:
raise KeyError, 'formula has no such term: %s' % repr(name) | identifier_body |
formula.py | """
Provides the basic classes needed to specify statistical models.
"""
import copy
import types
import numpy as N
__docformat__ = 'restructuredtext'
default_namespace = {}
class term(object):
"""
This class is very simple: it is just a named term in a model formula.
It is also callable: by default it namespace[self.name], where namespace
defaults to formula.default_namespace.
When called in an instance of formula,
the namespace used is that formula's namespace.
"""
def __pow__(self, power):
"""
Raise the quantitative term's values to an integer power, i.e.
polynomial.
"""
try:
power = float(power)
except:
raise ValueError, 'expecting a float'
if power == int(power):
name = '%s^%d' % (self.name, int(power))
else:
name = '%s^%0.2f' % (self.name, power)
value = quantitative(name, func=self, transform=lambda x: N.power(x, power))
value.power = power
value.namespace = self.namespace
return value
def __init__(self, name, func=None, termname=None):
self.name = name
self.__namespace = None
if termname is None:
self.termname = name
else:
self.termname = termname
if type(self.termname) is not types.StringType:
raise ValueError, 'expecting a string for termname'
if func:
self.func = func
# Namespace in which self.name will be looked up in, if needed
def _get_namespace(self):
if isinstance(self.__namespace, N.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def __str__(self):
"""
'<term: %s>' % self.termname
"""
return '<term: %s>' % self.termname
def __add__(self, other):
"""
formula(self) + formula(other)
"""
other = formula(other, namespace=self.namespace)
f = other + self
f.namespace = self.namespace
return f
def __mul__(self, other):
"""
formula(self) * formula(other)
"""
if other.name is 'intercept':
f = formula(self, namespace=self.namespace)
elif self.name is 'intercept':
f = formula(other, namespace=other.namespace)
else:
other = formula(other, namespace=self.namespace)
f = other * self
f.namespace = self.namespace
return f
def names(self):
"""
Return the names of the columns in design associated to the terms,
i.e. len(self.names()) = self().shape[0].
"""
if type(self.name) is types.StringType:
return [self.name]
else:
return list(self.name)
def __call__(self, *args, **kw):
"""
Return the columns associated to self in a design matrix.
If the term has no 'func' attribute, it returns
``self.namespace[self.termname]``
else, it returns
``self.func(*args, **kw)``
"""
if not hasattr(self, 'func'):
val = self.namespace[self.termname]
else:
val = self.func
if callable(val):
|
val = N.asarray(val)
return N.squeeze(val)
class factor(term):
"""
A categorical factor.
"""
def __init__(self, termname, keys, ordinal=False):
"""
factor is initialized with keys, representing all valid
levels of the factor.
"""
self.keys = list(set(keys))
self.keys.sort()
self._name = termname
self.termname = termname
self.ordinal = ordinal
if self.ordinal:
name = self.name
else:
name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]
term.__init__(self, name, termname=self.termname, func=self.get_columns)
def get_columns(self, *args, **kw):
"""
Calling function for factor instance.
"""
v = self.namespace[self._name]
while True:
if callable(v):
if hasattr(v, "namespace"):
v.namespace = self.namespace
v = v(*args, **kw)
else: break
if self.ordinal:
col = [float(self.keys.index(v[i])) for i in range(len(self.keys))]
return N.array(col)
else:
n = len(v)
value = []
for key in self.keys:
col = [float((v[i] == key)) for i in range(n)]
value.append(col)
return N.array(value)
def values(self, *args, **kw):
"""
Return the keys of the factor, rather than the columns of the design
matrix.
"""
del(self.func)
val = self(*args, **kw)
self.func = self.get_columns
return val
def verify(self, values):
"""
Verify that all values correspond to valid keys in self.
"""
s = set(values)
if not s.issubset(self.keys):
raise ValueError, 'unknown keys in values'
def __add__(self, other):
"""
formula(self) + formula(other)
When adding \'intercept\' to a factor, this just returns
formula(self, namespace=self.namespace)
"""
if other.name is 'intercept':
return formula(self, namespace=self.namespace)
else:
return term.__add__(self, other)
def main_effect(self, reference=None):
"""
Return the 'main effect' columns of a factor, choosing
a reference column number to remove.
"""
if reference is None:
reference = 0
names = self.names()
def maineffect_func(value, reference=reference):
rvalue = []
keep = range(value.shape[0])
keep.pop(reference)
for i in range(len(keep)):
rvalue.append(value[keep[i]] - value[reference])
return N.array(rvalue)
keep = range(len(self.names()))
keep.pop(reference)
__names = self.names()
_names = ['%s-%s' % (__names[keep[i]], __names[reference]) for i in range(len(keep))]
value = quantitative(_names, func=self,
termname='%s:maineffect' % self.termname,
transform=maineffect_func)
value.namespace = self.namespace
return value
class quantitative(term):
"""
A subclass of term that can be used to apply point transformations
of another term, i.e. to take powers:
>>> import numpy as N
>>> from scipy.sandbox.models import formula
>>> X = N.linspace(0,10,101)
>>> x = formula.term('X')
>>> x.namespace={'X':X}
>>> x2 = x**2
>>> print N.allclose(x()**2, x2())
True
>>> x3 = formula.quantitative('x2', func=x, transform=lambda x: x**2)
>>> x3.namespace = x.namespace
>>> print N.allclose(x()**2, x3())
True
"""
def __init__(self, name, func=None, termname=None, transform=lambda x: x):
self.transform = transform
term.__init__(self, name, func=func, termname=termname)
def __call__(self, *args, **kw):
"""
A quantitative is just like term, except there is an additional
transformation: self.transform.
"""
return self.transform(term.__call__(self, *args, **kw))
class formula(object):
"""
A formula object for manipulating design matrices in regression models,
essentially consisting of a list of term instances.
The object supports addition and multiplication which correspond
to concatenation and pairwise multiplication, respectively,
of the columns of the two formulas.
"""
def _get_namespace(self):
if isinstance(self.__namespace, N.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def _terms_changed(self):
self._names = self.names()
self._termnames = self.termnames()
def __init__(self, termlist, namespace=default_namespace):
"""
Create a formula from either:
i. a `formula` object
ii. a sequence of `term` instances
iii. one `term`
"""
self.__namespace = namespace
if isinstance(termlist, formula):
self.terms = copy.copy(list(termlist.terms))
elif type(termlist) is types.ListType:
self.terms = termlist
elif isinstance(termlist, term):
self.terms = [termlist]
else:
raise ValueError
self._terms_changed()
def __str__(self):
"""
String representation of list of termnames of a formula.
"""
value = []
for term in self.terms:
value += [term.termname]
return '<formula: %s>' % ' + '.join(value)
def __call__(self, *args, **kw):
"""
Create (transpose) of the design matrix of the formula within
namespace. Extra arguments are passed to each term instance. If
the formula just contains an intercept, then the keyword
argument 'nrow' indicates the number of rows (observations).
"""
if 'namespace' in kw:
namespace = kw['namespace']
else:
namespace = self.namespace
allvals = []
intercept = False
iindex = 0
for t in self.terms:
t.namespace = namespace
val = t(*args, **kw)
isintercept = False
if hasattr(t, "termname"):
if t.termname == 'intercept':
intercept = True
isintercept = True
interceptindex = iindex
allvals.append(None)
if val.ndim == 1 and not isintercept:
val.shape = (1, val.shape[0])
allvals.append(val)
elif not isintercept:
allvals.append(val)
iindex += 1
if not intercept:
try:
allvals = N.concatenate(allvals)
except:
pass
else:
nrow = kw.get('nrow', -1)
if allvals != []:
if interceptindex > 0:
n = allvals[0].shape[1]
else:
n = allvals[1].shape[1]
allvals[interceptindex] = N.ones((1,n), N.float64)
allvals = N.concatenate(allvals)
elif nrow <= 1:
raise ValueError, 'with only intercept in formula, keyword \'nrow\' argument needed'
else:
allvals = I(nrow=nrow)
allvals.shape = (1,) + allvals.shape
return allvals
def hasterm(self, query_term):
"""
Determine whether a given term is in a formula.
"""
if not isinstance(query_term, formula):
if type(query_term) == type("name"):
try: query = self[query_term]
except: return False
elif isinstance(query_term, term):
return query_term.termname in self.termnames()
elif len(query_term.terms) == 1:
query_term = query_term.terms[0]
return query_term.termname in self.termnames()
else:
raise ValueError, 'more than one term passed to hasterm'
def __getitem__(self, name):
t = self.termnames()
if name in t:
return self.terms[t.index(name)]
else:
raise KeyError, 'formula has no such term: %s' % repr(name)
def termcolumns(self, query_term, dict=False):
"""
Return a list of the indices of all columns associated
to a given term.
"""
if self.hasterm(query_term):
names = query_term.names()
value = {}
for name in names:
value[name] = self._names.index(name)
else:
raise ValueError, 'term not in formula'
if dict:
return value
else:
return value.values()
def names(self):
"""
Return a list of the names in the formula. The order of the
names corresponds to the order of the columns when self
is evaluated.
"""
allnames = []
for term in self.terms:
allnames += term.names()
return allnames
def termnames(self):
"""
Return a list of the term names in the formula. These
are the names of each term instance in self.
"""
names = []
for term in self.terms:
names += [term.termname]
return names
def design(self, *args, **kw):
"""
``transpose(self(*args, **kw))``
"""
return self(*args, **kw).T
def __mul__(self, other, nested=False):
"""
This returns a formula whose columns are the pairwise
product of the columns of self and other.
TO DO: check for nesting relationship. Should not be too difficult.
"""
other = formula(other, namespace=self.namespace)
selftermnames = self.termnames()
othertermnames = other.termnames()
I = len(selftermnames)
J = len(othertermnames)
terms = []
termnames = []
for i in range(I):
for j in range(J):
termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))
pieces = termname.split('*')
pieces.sort()
termname = '*'.join(pieces)
termnames.append(termname)
selfnames = self.terms[i].names()
othernames = other.terms[j].names()
if self.terms[i].name is 'intercept':
_term = other.terms[j]
_term.namespace = other.namespace
elif other.terms[j].name is 'intercept':
_term = self.terms[i]
_term.namespace = self.namespace
else:
names = []
d1 = len(selfnames)
d2 = len(othernames)
for r in range(d1):
for s in range(d2):
name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))
pieces = name.split('*')
pieces.sort()
name = '*'.join(pieces)
names.append(name)
def product_func(value, d1=d1, d2=d2):
out = []
for r in range(d1):
for s in range(d2):
out.append(value[r] * value[d1+s])
return N.array(out)
sumterms = self + other
sumterms.terms = [self, other] # enforce the order we want
sumterms.namespace = self.namespace
_term = quantitative(names, func=sumterms, termname=termname,
transform=product_func)
_term.namespace = self.namespace
terms.append(_term)
return formula(terms, namespace=self.namespace)
def __add__(self, other):
"""
Return a formula whose columns are the
concatenation of the columns of self and other.
terms in the formula are sorted alphabetically.
"""
other = formula(other, namespace=self.namespace)
terms = self.terms + other.terms
pieces = [(term.name, term) for term in terms]
pieces.sort()
terms = [piece[1] for piece in pieces]
return formula(terms, namespace=self.namespace)
def __sub__(self, other):
"""
Return a formula with all terms in other removed from self.
If other contains term instances not in formula, this
function does not raise an exception.
"""
other = formula(other, namespace=self.namespace)
terms = copy.copy(self.terms)
for term in other.terms:
for i in range(len(terms)):
if terms[i].termname == term.termname:
terms.pop(i)
break
return formula(terms, namespace=self.namespace)
def isnested(A, B, namespace=globals()):
"""
Is factor B nested within factor A or vice versa: a very crude test
which depends on the namespace.
If they are nested, returns (True, F) where F is the finest
level of the relationship. Otherwise, returns (False, None)
"""
a = A(namespace, values=True)[0]
b = B(namespace, values=True)[0]
if len(a) != len(b):
raise ValueError, 'A() and B() should be sequences of the same length'
nA = len(set(a))
nB = len(set(b))
n = max(nA, nB)
AB = [(a[i],b[i]) for i in range(len(a))]
nAB = len(set(AB))
if nAB == n:
if nA > nB:
F = A
else:
F = B
return (True, F)
else:
return (False, None)
def _intercept_fn(nrow=1, **extra):
return N.ones((1,nrow))
I = term('intercept', func=_intercept_fn)
I.__doc__ = """
Intercept term in a formula. If intercept is the
only term in the formula, then a keywords argument
\'nrow\' is needed.
>>> from scipy.sandbox.models.formula import formula, I
>>> I()
array(1.0)
>>> I(nrow=5)
array([ 1., 1., 1., 1., 1.])
>>> f=formula(I)
>>> f(nrow=5)
array([1, 1, 1, 1, 1])
"""
| if hasattr(val, "namespace"):
val.namespace = self.namespace
val = val(*args, **kw) | conditional_block |
backup_slack.py | #!/usr/bin/env python
# Slack scraper for logging messages and attachments in slack
# clarence.wret@gmail.com, cwret@fnal.gov
# Slacker import
from slacker import Slacker
import os
import operator
import datetime
import time
import re
# Neede for exit
import sys
# Needed to pull files from Slack
import urllib2
from shutil import copyfile
# The Slack legacy API key
api_key = "YOUR_API_KEY"
# Make the slacker object which we use for the API
slack = Slacker(api_key)
# The time pattern we want
time_pattern = "%Y-%m-%d_%H:%M:%S"
# Get the month for today
month = datetime.datetime.today().strftime('%Y_%m')
# The logging directory
logfile_dir = "YOUR_POSTING_DIRECTORY/%s"%(month)
if not os.path.exists(logfile_dir):
os.makedirs(logfile_dir)
# User name of bot
user="USER"
# Icon of bot
icon="ICON_POSTER"
# The "header" for the Slack message, aka pretext
subject="MESSAGE"
# Do we post to channels?
post_to_channel = False
# The channel that posts the results of the logger
log_channel = "CHANNEL_TO_POST_LOG"
log_channel_priv = "CHANNEL_TO_POST_PRIVATE_LOG"
# A message class which we loop over in main
class Message:
def __init__(self, message, users):
# Get the text of the message
self.text = message["text"]
# Get the time stamp
self.timestamp, self.msg_id = map(int, message["ts"].split("."))
# Set the time
self.time_formatted = datetime.datetime.fromtimestamp(self.timestamp).strftime(time_pattern)
# Check if the message is a file attachment
try:
self.subtype = message["files"]
except KeyError:
self.subtype = None
self.link = []
self.linkname = []
# Get some file shares and hosted
if self.subtype != None:
# May be many attachments in one message
for tempfile in message["files"]:
# Only care about hosted files
if tempfile["mode"] == "hosted":
self.link.append(tempfile["url_private"])
self.linkname.append(tempfile["name"])
extension = os.path.splitext(self.linkname[-1])[1]
self.linkname.append((os.path.splitext(self.linkname[-1])[0]+"_"+self.time_formatted+extension).replace(" ", "_"))
# Naming of messages is wildly inconsistent...
try:
self.uid = message["user"]
self.username = users[self.uid]
# If something goes wrong with our key
except KeyError:
try:
# Maybe this logged as a bot
self.uid = message["bot_id"]
self.username = message["username"]
# The logging bot (running this program) formats in a special way
if self.username == user:
self.text = message["attachments"][0]["pretext"]+message["attachments"][0]["text"]
except KeyError:
# or maybe this was a comment of a comment
try:
self.uid = message["comment"]["user"]
self.username = users[self.uid]
except KeyError:
# or maybe this was a comment of a attachment
try:
self.uid = message["attachements"]["bot_id"]
self.username = users["username"]
except KeyError:
self.uid = message["bot_id"]
# The official GitHub app seems to post messages, uhm, differently
if self.uid == "B1QTP89JT":
self.username = "GitHub"
altmessage = ""
# GitHub can't deal with Leila's name or one of Kirsty's commit containing \xef
try:
altmessage = message["attachments"][0]["pretext"]
except UnicodeEncodeError:
altmessage = message["attachments"][0]["pretext"].encode('utf-8')
except KeyError:
pass
try:
altmessage += message["attachments"][0]["text"]
except UnicodeEncodeError:
altmessage += message["attachments"][0]["text"].encode('utf-8')
except KeyError:
pass
self.text = altmessage
else:
self.username = "Unknown bot"
def AsLine(self, replacement_dicts):
l = u"[%s]\n %s: %s"%(self.time_formatted, self.username.ljust(17), self.text)
for d in replacement_dicts:
for k, v in d.iteritems():
l = l.replace(k, v)
return l
# The main function to run
def main():
# Get the users, channels and private channels
users = GetUsers()
channels = GetChannels()
priv_channels = GetChannelsPrivate()
# The channel ids for the logging channels
log_channel_id = ""
log_channel_id_priv = ""
#############################
# Do the public channels
n_new_lines = dict([(c,0) for c in channels.iterkeys()])
n_new_attach = dict([(c,0) for c in channels.iterkeys()])
for chan_id, chan_name in channels.iteritems():
logfile_name = logfile_dir+"/%s_log_%s.txt"%(chan_name, month)
last_datetime = get_last_message_datetime(logfile_name)
last_timestamp = time.mktime(last_datetime.timetuple())
# Last true referes to if channels is private
# Slack API changed a few months ago
raw_messages = GetFullMessages(chan_id, chan_name, False)
messages = [Message(m, users) for m in raw_messages]
# Get rid of messages which are too old
messages = [m for m in messages if m.timestamp > last_timestamp]
messages.sort(key=lambda x: x.timestamp)
# Find the logging channels id (not name!)
if chan_name == log_channel:
log_channel_id = chan_id
if chan_name == log_channel_priv:
log_channel_id_priv = chan_id
# Open the file to append to and write the log
with open(logfile_name,"a") as f:
for m in messages:
line = m.AsLine([channels, users])+"\n"
f.write(line.encode('utf8'))
n_new_lines[chan_id] += 1
# Get the attachments in the messages
for m in messages:
if m.link != None:
# Make the directory
logfile_img = logfile_name.strip(".txt")+"_img"
if not os.path.exists(logfile_img):
os.makedirs(logfile_img)
for filenumber in range(len(m.link)):
filename = logfile_img+"/"+m.linkname[filenumber]
# Make the OAuth request using the slack key
req = urllib2.Request(m.link[filenumber], None, {'Authorization' : 'Bearer '+api_key})
response = urllib2.urlopen(req)
file = open(filename, 'wb')
file.write(response.read())
file.close()
n_new_attach[chan_id] += 1
# Finally copy over the htaccess to the directory
copyfile("/home/cwret/slack_logger/keys/htaccess_pub", logfile_dir+"/.htaccess")
#############################
# Now do the private channels
n_priv_new_lines = dict([(c,0) for c in priv_channels.iterkeys()])
n_new_attach_priv = dict([(c,0) for c in priv_channels.iterkeys()])
# Add something private for private channels
privlogfile_dir = logfile_dir+"/private"
# Make the directory
if not os.path.exists(privlogfile_dir):
os.makedirs(privlogfile_dir)
for chan_id, chan_name in priv_channels.iteritems():
logfile_name = privlogfile_dir+"/%s_log_%s.txt"%(chan_name,month)
last_datetime = get_last_message_datetime(logfile_name)
last_timestamp = time.mktime(last_datetime.timetuple())
# Last true referes to if channels is private
# Slack API changed a few months ago
raw_messages = GetFullMessages(chan_id, chan_name, True)
messages = [Message(m, users) for m in raw_messages]
# Get rid of messages which are too old
messages = [m for m in messages if m.timestamp > last_timestamp]
messages.sort(key=lambda x: x.timestamp)
if chan_name == log_channel:
log_channel_id = chan_id
if chan_name == log_channel_priv:
log_channel_id_priv = chan_id
# Open the file to append to and write the log
with open(logfile_name,"a") as f:
for m in messages:
line = m.AsLine([priv_channels, users])+"\n"
f.write(line.encode('utf8'))
n_priv_new_lines[chan_id] += 1
# Get the attachments in the private messages
for m in messages:
if m.link != None:
# Make the directory
logfile_img = logfile_name.strip(".txt")+"_img"
if not os.path.exists(logfile_img):
os.makedirs(logfile_img)
for filenumber in range(len(m.link)):
filename = logfile_img+"/"+m.linkname[filenumber]
# Make the OAuth request using the slack key
req = urllib2.Request(m.link[filenumber], None, {'Authorization' : 'Bearer '+api_key})
response = urllib2.urlopen(req)
file = open(filename, 'wb')
file.write(response.read())
file.close()
n_new_attach[chan_id] += 1
# Finally copy over the htaccess to the directory
copyfile("/home/cwret/slack_logger/keys/htaccess_priv", privlogfile_dir+"/.htaccess")
if post_to_channel:
# The body we will use to send to Slack
body = ""
if log_channel_id != None:
for chan_id,n in n_new_lines.iteritems():
output = "Wrote "+`n`+" messages for #"+channels[chan_id]
body += output+"\n"
print output
post=subject
slack.chat.post_message(
channel=log_channel_id,
as_user=False,
username=user,
icon_url=icon,
attachments=[{"pretext": post,
"fallback": post,
"color": "#36a64f",
"footer": user,
"text": body}])
# Reset the body for the private messages
body=""
if log_channel_id != None:
for chan_id,n in n_priv_new_lines.iteritems():
|
post=subject+"/private"
slack.chat.post_message(
channel=log_channel_id_priv,
as_user=False,
username=user,
icon_url=icon,
attachments=[{"pretext": post,
"fallback": post,
"color": "#36a64f",
"footer": user,
"text": body}])
return
# Get the last logger message date and time
def get_last_message_datetime(logfile_name):
# Open the logfile that might already be written
try:
f = open(logfile_name, "r")
except IOError:
return datetime.datetime.fromtimestamp(0)
lines = reversed([l for l in f])
f.close()
matcher = re.compile(r"^\[(\d\d\d\d-\d\d-\d\d\_\d\d:\d\d:\d\d)]") # the date-time pattern above
last_datetime = datetime.datetime.fromtimestamp(0)
for l in lines:
m = matcher.search(l)
if m is None: continue
last_time_formatted = m.group(1)
last_datetime = datetime.datetime.strptime(last_time_formatted, time_pattern)
break
return last_datetime
# Get a dict of users for a given slack
def GetUsers():
Users = dict()
l = slack.users.list().body["members"]
for u in l:
Users[u["id"]] = u["name"]
return Users
# Get a dict of channels for a given slack
def GetChannels():
Channels = dict()
l = slack.channels.list().body["channels"]
for c in l:
Channels[c["id"]] = c["name"]
return Channels
# Get a dict of private channels for a given slack
def GetChannelsPrivate():
Priv_Channels = dict()
l = slack.groups.list().body["groups"]
for c in l:
Priv_Channels[c["id"]] = c["name"]
return Priv_Channels
def GetFiles():
Files = dict()
l = slack.files.list()
# Get a full list of messages from Slack
def GetFullMessages(chan_id, chan_name, priv):
# Get the last 1000 messages (maximum we can get from Slack at one time)
if priv == False:
resp = slack.channels.history(chan_id, count=1000, inclusive=True)
else:
resp = slack.groups.history(chan_id, count=1000, inclusive=True)
raw_messages = resp.body["messages"]
# This is true if there are more messages we can get
has_more = resp.body["has_more"]
while has_more:
# Get the timestamp for the earliest message we got in previous iteration
timestamp = resp.body["messages"][-1]["ts"]
# Make another request for the next messages
if priv == False:
resp = slack.channels.history(chan_id, count=1000, inclusive=True, latest=timestamp)
else:
resp = slack.groups.history(chan_id, count=1000, inclusive=True, latest=timestamp)
# Prepend our older messages
raw_messages = resp.body["messages"] + raw_messages
# Check if we still have more
has_more = resp.body["has_more"]
return raw_messages
# The main we run
if __name__=="__main__":
main()
| output = "Wrote "+`n`+" private messages for #"+priv_channels[chan_id]
body += output+"\n"
print output | conditional_block |
backup_slack.py | #!/usr/bin/env python
# Slack scraper for logging messages and attachments in slack
# clarence.wret@gmail.com, cwret@fnal.gov
# Slacker import
from slacker import Slacker
import os
import operator
import datetime
import time
import re
# Neede for exit
import sys
# Needed to pull files from Slack
import urllib2
from shutil import copyfile
# The Slack legacy API key
api_key = "YOUR_API_KEY"
# Make the slacker object which we use for the API
slack = Slacker(api_key)
# The time pattern we want
time_pattern = "%Y-%m-%d_%H:%M:%S"
# Get the month for today
month = datetime.datetime.today().strftime('%Y_%m')
# The logging directory
logfile_dir = "YOUR_POSTING_DIRECTORY/%s"%(month)
if not os.path.exists(logfile_dir):
os.makedirs(logfile_dir)
# User name of bot
user="USER"
# Icon of bot
icon="ICON_POSTER"
# The "header" for the Slack message, aka pretext
subject="MESSAGE"
# Do we post to channels?
post_to_channel = False
# The channel that posts the results of the logger
log_channel = "CHANNEL_TO_POST_LOG"
log_channel_priv = "CHANNEL_TO_POST_PRIVATE_LOG"
# A message class which we loop over in main
class Message:
def __init__(self, message, users):
# Get the text of the message
self.text = message["text"]
# Get the time stamp
self.timestamp, self.msg_id = map(int, message["ts"].split("."))
# Set the time
self.time_formatted = datetime.datetime.fromtimestamp(self.timestamp).strftime(time_pattern)
# Check if the message is a file attachment
try:
self.subtype = message["files"]
except KeyError:
self.subtype = None
self.link = []
self.linkname = []
# Get some file shares and hosted
if self.subtype != None:
# May be many attachments in one message
for tempfile in message["files"]:
# Only care about hosted files
if tempfile["mode"] == "hosted":
self.link.append(tempfile["url_private"])
self.linkname.append(tempfile["name"])
extension = os.path.splitext(self.linkname[-1])[1]
self.linkname.append((os.path.splitext(self.linkname[-1])[0]+"_"+self.time_formatted+extension).replace(" ", "_"))
# Naming of messages is wildly inconsistent...
try:
self.uid = message["user"]
self.username = users[self.uid]
# If something goes wrong with our key
except KeyError:
try:
# Maybe this logged as a bot
self.uid = message["bot_id"]
self.username = message["username"]
# The logging bot (running this program) formats in a special way
if self.username == user:
self.text = message["attachments"][0]["pretext"]+message["attachments"][0]["text"]
except KeyError:
# or maybe this was a comment of a comment
try:
self.uid = message["comment"]["user"]
self.username = users[self.uid]
except KeyError:
# or maybe this was a comment of a attachment
try:
self.uid = message["attachements"]["bot_id"]
self.username = users["username"]
except KeyError:
self.uid = message["bot_id"]
# The official GitHub app seems to post messages, uhm, differently
if self.uid == "B1QTP89JT":
self.username = "GitHub"
altmessage = ""
# GitHub can't deal with Leila's name or one of Kirsty's commit containing \xef
try:
altmessage = message["attachments"][0]["pretext"]
except UnicodeEncodeError:
altmessage = message["attachments"][0]["pretext"].encode('utf-8')
except KeyError:
pass
try:
altmessage += message["attachments"][0]["text"]
except UnicodeEncodeError:
altmessage += message["attachments"][0]["text"].encode('utf-8')
except KeyError:
pass
self.text = altmessage
else:
self.username = "Unknown bot"
def AsLine(self, replacement_dicts):
l = u"[%s]\n %s: %s"%(self.time_formatted, self.username.ljust(17), self.text)
for d in replacement_dicts:
for k, v in d.iteritems():
l = l.replace(k, v)
return l
# The main function to run
def main():
# Get the users, channels and private channels
users = GetUsers()
channels = GetChannels()
priv_channels = GetChannelsPrivate()
# The channel ids for the logging channels
log_channel_id = ""
log_channel_id_priv = ""
#############################
# Do the public channels
n_new_lines = dict([(c,0) for c in channels.iterkeys()])
n_new_attach = dict([(c,0) for c in channels.iterkeys()])
for chan_id, chan_name in channels.iteritems():
logfile_name = logfile_dir+"/%s_log_%s.txt"%(chan_name, month)
last_datetime = get_last_message_datetime(logfile_name)
last_timestamp = time.mktime(last_datetime.timetuple())
# Last true referes to if channels is private
# Slack API changed a few months ago
raw_messages = GetFullMessages(chan_id, chan_name, False)
messages = [Message(m, users) for m in raw_messages]
# Get rid of messages which are too old
messages = [m for m in messages if m.timestamp > last_timestamp]
messages.sort(key=lambda x: x.timestamp)
# Find the logging channels id (not name!)
if chan_name == log_channel:
log_channel_id = chan_id
if chan_name == log_channel_priv:
log_channel_id_priv = chan_id
# Open the file to append to and write the log
with open(logfile_name,"a") as f:
for m in messages:
line = m.AsLine([channels, users])+"\n"
f.write(line.encode('utf8'))
n_new_lines[chan_id] += 1
# Get the attachments in the messages
for m in messages:
if m.link != None:
# Make the directory
logfile_img = logfile_name.strip(".txt")+"_img"
if not os.path.exists(logfile_img):
os.makedirs(logfile_img)
for filenumber in range(len(m.link)):
filename = logfile_img+"/"+m.linkname[filenumber]
# Make the OAuth request using the slack key
req = urllib2.Request(m.link[filenumber], None, {'Authorization' : 'Bearer '+api_key})
response = urllib2.urlopen(req)
file = open(filename, 'wb')
file.write(response.read())
file.close()
n_new_attach[chan_id] += 1
# Finally copy over the htaccess to the directory
copyfile("/home/cwret/slack_logger/keys/htaccess_pub", logfile_dir+"/.htaccess")
#############################
# Now do the private channels
n_priv_new_lines = dict([(c,0) for c in priv_channels.iterkeys()])
n_new_attach_priv = dict([(c,0) for c in priv_channels.iterkeys()])
# Add something private for private channels
privlogfile_dir = logfile_dir+"/private"
# Make the directory
if not os.path.exists(privlogfile_dir):
os.makedirs(privlogfile_dir)
for chan_id, chan_name in priv_channels.iteritems():
logfile_name = privlogfile_dir+"/%s_log_%s.txt"%(chan_name,month)
last_datetime = get_last_message_datetime(logfile_name)
last_timestamp = time.mktime(last_datetime.timetuple())
# Last true referes to if channels is private
# Slack API changed a few months ago
raw_messages = GetFullMessages(chan_id, chan_name, True)
messages = [Message(m, users) for m in raw_messages]
# Get rid of messages which are too old
messages = [m for m in messages if m.timestamp > last_timestamp]
messages.sort(key=lambda x: x.timestamp)
if chan_name == log_channel:
log_channel_id = chan_id
if chan_name == log_channel_priv:
log_channel_id_priv = chan_id
# Open the file to append to and write the log
with open(logfile_name,"a") as f:
for m in messages:
line = m.AsLine([priv_channels, users])+"\n"
f.write(line.encode('utf8'))
n_priv_new_lines[chan_id] += 1
# Get the attachments in the private messages
for m in messages:
if m.link != None:
# Make the directory
logfile_img = logfile_name.strip(".txt")+"_img"
if not os.path.exists(logfile_img):
os.makedirs(logfile_img)
for filenumber in range(len(m.link)):
filename = logfile_img+"/"+m.linkname[filenumber]
# Make the OAuth request using the slack key
req = urllib2.Request(m.link[filenumber], None, {'Authorization' : 'Bearer '+api_key})
response = urllib2.urlopen(req)
file = open(filename, 'wb')
file.write(response.read())
file.close()
n_new_attach[chan_id] += 1
# Finally copy over the htaccess to the directory
copyfile("/home/cwret/slack_logger/keys/htaccess_priv", privlogfile_dir+"/.htaccess")
if post_to_channel:
# The body we will use to send to Slack
body = ""
if log_channel_id != None:
for chan_id,n in n_new_lines.iteritems():
output = "Wrote "+`n`+" messages for #"+channels[chan_id]
body += output+"\n"
print output
post=subject
slack.chat.post_message(
channel=log_channel_id,
as_user=False,
username=user,
icon_url=icon,
attachments=[{"pretext": post,
"fallback": post,
"color": "#36a64f",
"footer": user,
"text": body}])
# Reset the body for the private messages
body=""
if log_channel_id != None:
for chan_id,n in n_priv_new_lines.iteritems():
output = "Wrote "+`n`+" private messages for #"+priv_channels[chan_id]
body += output+"\n"
print output
post=subject+"/private"
slack.chat.post_message(
channel=log_channel_id_priv,
as_user=False,
username=user,
icon_url=icon,
attachments=[{"pretext": post,
"fallback": post,
"color": "#36a64f",
"footer": user,
"text": body}])
return
# Get the last logger message date and time
def get_last_message_datetime(logfile_name):
# Open the logfile that might already be written
try:
f = open(logfile_name, "r")
except IOError:
return datetime.datetime.fromtimestamp(0)
lines = reversed([l for l in f])
f.close()
matcher = re.compile(r"^\[(\d\d\d\d-\d\d-\d\d\_\d\d:\d\d:\d\d)]") # the date-time pattern above
last_datetime = datetime.datetime.fromtimestamp(0)
for l in lines:
m = matcher.search(l)
if m is None: continue
last_time_formatted = m.group(1)
last_datetime = datetime.datetime.strptime(last_time_formatted, time_pattern)
break
return last_datetime
# Get a dict of users for a given slack
def GetUsers():
Users = dict()
l = slack.users.list().body["members"]
for u in l:
Users[u["id"]] = u["name"]
return Users
# Get a dict of channels for a given slack
def GetChannels():
|
# Get a dict of private channels for a given slack
def GetChannelsPrivate():
Priv_Channels = dict()
l = slack.groups.list().body["groups"]
for c in l:
Priv_Channels[c["id"]] = c["name"]
return Priv_Channels
def GetFiles():
Files = dict()
l = slack.files.list()
# Get a full list of messages from Slack
def GetFullMessages(chan_id, chan_name, priv):
# Get the last 1000 messages (maximum we can get from Slack at one time)
if priv == False:
resp = slack.channels.history(chan_id, count=1000, inclusive=True)
else:
resp = slack.groups.history(chan_id, count=1000, inclusive=True)
raw_messages = resp.body["messages"]
# This is true if there are more messages we can get
has_more = resp.body["has_more"]
while has_more:
# Get the timestamp for the earliest message we got in previous iteration
timestamp = resp.body["messages"][-1]["ts"]
# Make another request for the next messages
if priv == False:
resp = slack.channels.history(chan_id, count=1000, inclusive=True, latest=timestamp)
else:
resp = slack.groups.history(chan_id, count=1000, inclusive=True, latest=timestamp)
# Prepend our older messages
raw_messages = resp.body["messages"] + raw_messages
# Check if we still have more
has_more = resp.body["has_more"]
return raw_messages
# The main we run
if __name__=="__main__":
main()
| Channels = dict()
l = slack.channels.list().body["channels"]
for c in l:
Channels[c["id"]] = c["name"]
return Channels | identifier_body |
backup_slack.py | #!/usr/bin/env python
# Slack scraper for logging messages and attachments in slack
# clarence.wret@gmail.com, cwret@fnal.gov
# Slacker import
from slacker import Slacker
import os
import operator
import datetime
import time
import re
# Neede for exit
import sys
# Needed to pull files from Slack
import urllib2
from shutil import copyfile
# The Slack legacy API key
api_key = "YOUR_API_KEY"
# Make the slacker object which we use for the API
slack = Slacker(api_key)
# The time pattern we want
time_pattern = "%Y-%m-%d_%H:%M:%S"
# Get the month for today
month = datetime.datetime.today().strftime('%Y_%m')
# The logging directory
logfile_dir = "YOUR_POSTING_DIRECTORY/%s"%(month)
if not os.path.exists(logfile_dir):
os.makedirs(logfile_dir)
# User name of bot
user="USER"
# Icon of bot
icon="ICON_POSTER"
# The "header" for the Slack message, aka pretext
subject="MESSAGE"
# Do we post to channels?
post_to_channel = False
# The channel that posts the results of the logger
log_channel = "CHANNEL_TO_POST_LOG"
log_channel_priv = "CHANNEL_TO_POST_PRIVATE_LOG"
# A message class which we loop over in main
class Message:
def __init__(self, message, users):
# Get the text of the message
self.text = message["text"]
# Get the time stamp
self.timestamp, self.msg_id = map(int, message["ts"].split("."))
# Set the time
self.time_formatted = datetime.datetime.fromtimestamp(self.timestamp).strftime(time_pattern)
# Check if the message is a file attachment
try:
self.subtype = message["files"] | self.subtype = None
self.link = []
self.linkname = []
# Get some file shares and hosted
if self.subtype != None:
# May be many attachments in one message
for tempfile in message["files"]:
# Only care about hosted files
if tempfile["mode"] == "hosted":
self.link.append(tempfile["url_private"])
self.linkname.append(tempfile["name"])
extension = os.path.splitext(self.linkname[-1])[1]
self.linkname.append((os.path.splitext(self.linkname[-1])[0]+"_"+self.time_formatted+extension).replace(" ", "_"))
# Naming of messages is wildly inconsistent...
try:
self.uid = message["user"]
self.username = users[self.uid]
# If something goes wrong with our key
except KeyError:
try:
# Maybe this logged as a bot
self.uid = message["bot_id"]
self.username = message["username"]
# The logging bot (running this program) formats in a special way
if self.username == user:
self.text = message["attachments"][0]["pretext"]+message["attachments"][0]["text"]
except KeyError:
# or maybe this was a comment of a comment
try:
self.uid = message["comment"]["user"]
self.username = users[self.uid]
except KeyError:
# or maybe this was a comment of a attachment
try:
self.uid = message["attachements"]["bot_id"]
self.username = users["username"]
except KeyError:
self.uid = message["bot_id"]
# The official GitHub app seems to post messages, uhm, differently
if self.uid == "B1QTP89JT":
self.username = "GitHub"
altmessage = ""
# GitHub can't deal with Leila's name or one of Kirsty's commit containing \xef
try:
altmessage = message["attachments"][0]["pretext"]
except UnicodeEncodeError:
altmessage = message["attachments"][0]["pretext"].encode('utf-8')
except KeyError:
pass
try:
altmessage += message["attachments"][0]["text"]
except UnicodeEncodeError:
altmessage += message["attachments"][0]["text"].encode('utf-8')
except KeyError:
pass
self.text = altmessage
else:
self.username = "Unknown bot"
def AsLine(self, replacement_dicts):
l = u"[%s]\n %s: %s"%(self.time_formatted, self.username.ljust(17), self.text)
for d in replacement_dicts:
for k, v in d.iteritems():
l = l.replace(k, v)
return l
# The main function to run
def main():
# Get the users, channels and private channels
users = GetUsers()
channels = GetChannels()
priv_channels = GetChannelsPrivate()
# The channel ids for the logging channels
log_channel_id = ""
log_channel_id_priv = ""
#############################
# Do the public channels
n_new_lines = dict([(c,0) for c in channels.iterkeys()])
n_new_attach = dict([(c,0) for c in channels.iterkeys()])
for chan_id, chan_name in channels.iteritems():
logfile_name = logfile_dir+"/%s_log_%s.txt"%(chan_name, month)
last_datetime = get_last_message_datetime(logfile_name)
last_timestamp = time.mktime(last_datetime.timetuple())
# Last true referes to if channels is private
# Slack API changed a few months ago
raw_messages = GetFullMessages(chan_id, chan_name, False)
messages = [Message(m, users) for m in raw_messages]
# Get rid of messages which are too old
messages = [m for m in messages if m.timestamp > last_timestamp]
messages.sort(key=lambda x: x.timestamp)
# Find the logging channels id (not name!)
if chan_name == log_channel:
log_channel_id = chan_id
if chan_name == log_channel_priv:
log_channel_id_priv = chan_id
# Open the file to append to and write the log
with open(logfile_name,"a") as f:
for m in messages:
line = m.AsLine([channels, users])+"\n"
f.write(line.encode('utf8'))
n_new_lines[chan_id] += 1
# Get the attachments in the messages
for m in messages:
if m.link != None:
# Make the directory
logfile_img = logfile_name.strip(".txt")+"_img"
if not os.path.exists(logfile_img):
os.makedirs(logfile_img)
for filenumber in range(len(m.link)):
filename = logfile_img+"/"+m.linkname[filenumber]
# Make the OAuth request using the slack key
req = urllib2.Request(m.link[filenumber], None, {'Authorization' : 'Bearer '+api_key})
response = urllib2.urlopen(req)
file = open(filename, 'wb')
file.write(response.read())
file.close()
n_new_attach[chan_id] += 1
# Finally copy over the htaccess to the directory
copyfile("/home/cwret/slack_logger/keys/htaccess_pub", logfile_dir+"/.htaccess")
#############################
# Now do the private channels
n_priv_new_lines = dict([(c,0) for c in priv_channels.iterkeys()])
n_new_attach_priv = dict([(c,0) for c in priv_channels.iterkeys()])
# Add something private for private channels
privlogfile_dir = logfile_dir+"/private"
# Make the directory
if not os.path.exists(privlogfile_dir):
os.makedirs(privlogfile_dir)
for chan_id, chan_name in priv_channels.iteritems():
logfile_name = privlogfile_dir+"/%s_log_%s.txt"%(chan_name,month)
last_datetime = get_last_message_datetime(logfile_name)
last_timestamp = time.mktime(last_datetime.timetuple())
# Last true referes to if channels is private
# Slack API changed a few months ago
raw_messages = GetFullMessages(chan_id, chan_name, True)
messages = [Message(m, users) for m in raw_messages]
# Get rid of messages which are too old
messages = [m for m in messages if m.timestamp > last_timestamp]
messages.sort(key=lambda x: x.timestamp)
if chan_name == log_channel:
log_channel_id = chan_id
if chan_name == log_channel_priv:
log_channel_id_priv = chan_id
# Open the file to append to and write the log
with open(logfile_name,"a") as f:
for m in messages:
line = m.AsLine([priv_channels, users])+"\n"
f.write(line.encode('utf8'))
n_priv_new_lines[chan_id] += 1
# Get the attachments in the private messages
for m in messages:
if m.link != None:
# Make the directory
logfile_img = logfile_name.strip(".txt")+"_img"
if not os.path.exists(logfile_img):
os.makedirs(logfile_img)
for filenumber in range(len(m.link)):
filename = logfile_img+"/"+m.linkname[filenumber]
# Make the OAuth request using the slack key
req = urllib2.Request(m.link[filenumber], None, {'Authorization' : 'Bearer '+api_key})
response = urllib2.urlopen(req)
file = open(filename, 'wb')
file.write(response.read())
file.close()
n_new_attach[chan_id] += 1
# Finally copy over the htaccess to the directory
copyfile("/home/cwret/slack_logger/keys/htaccess_priv", privlogfile_dir+"/.htaccess")
if post_to_channel:
# The body we will use to send to Slack
body = ""
if log_channel_id != None:
for chan_id,n in n_new_lines.iteritems():
output = "Wrote "+`n`+" messages for #"+channels[chan_id]
body += output+"\n"
print output
post=subject
slack.chat.post_message(
channel=log_channel_id,
as_user=False,
username=user,
icon_url=icon,
attachments=[{"pretext": post,
"fallback": post,
"color": "#36a64f",
"footer": user,
"text": body}])
# Reset the body for the private messages
body=""
if log_channel_id != None:
for chan_id,n in n_priv_new_lines.iteritems():
output = "Wrote "+`n`+" private messages for #"+priv_channels[chan_id]
body += output+"\n"
print output
post=subject+"/private"
slack.chat.post_message(
channel=log_channel_id_priv,
as_user=False,
username=user,
icon_url=icon,
attachments=[{"pretext": post,
"fallback": post,
"color": "#36a64f",
"footer": user,
"text": body}])
return
# Get the last logger message date and time
def get_last_message_datetime(logfile_name):
# Open the logfile that might already be written
try:
f = open(logfile_name, "r")
except IOError:
return datetime.datetime.fromtimestamp(0)
lines = reversed([l for l in f])
f.close()
matcher = re.compile(r"^\[(\d\d\d\d-\d\d-\d\d\_\d\d:\d\d:\d\d)]") # the date-time pattern above
last_datetime = datetime.datetime.fromtimestamp(0)
for l in lines:
m = matcher.search(l)
if m is None: continue
last_time_formatted = m.group(1)
last_datetime = datetime.datetime.strptime(last_time_formatted, time_pattern)
break
return last_datetime
# Get a dict of users for a given slack
def GetUsers():
Users = dict()
l = slack.users.list().body["members"]
for u in l:
Users[u["id"]] = u["name"]
return Users
# Get a dict of channels for a given slack
def GetChannels():
Channels = dict()
l = slack.channels.list().body["channels"]
for c in l:
Channels[c["id"]] = c["name"]
return Channels
# Get a dict of private channels for a given slack
def GetChannelsPrivate():
Priv_Channels = dict()
l = slack.groups.list().body["groups"]
for c in l:
Priv_Channels[c["id"]] = c["name"]
return Priv_Channels
def GetFiles():
Files = dict()
l = slack.files.list()
# Get a full list of messages from Slack
def GetFullMessages(chan_id, chan_name, priv):
# Get the last 1000 messages (maximum we can get from Slack at one time)
if priv == False:
resp = slack.channels.history(chan_id, count=1000, inclusive=True)
else:
resp = slack.groups.history(chan_id, count=1000, inclusive=True)
raw_messages = resp.body["messages"]
# This is true if there are more messages we can get
has_more = resp.body["has_more"]
while has_more:
# Get the timestamp for the earliest message we got in previous iteration
timestamp = resp.body["messages"][-1]["ts"]
# Make another request for the next messages
if priv == False:
resp = slack.channels.history(chan_id, count=1000, inclusive=True, latest=timestamp)
else:
resp = slack.groups.history(chan_id, count=1000, inclusive=True, latest=timestamp)
# Prepend our older messages
raw_messages = resp.body["messages"] + raw_messages
# Check if we still have more
has_more = resp.body["has_more"]
return raw_messages
# The main we run
if __name__=="__main__":
main() | except KeyError: | random_line_split |
backup_slack.py | #!/usr/bin/env python
# Slack scraper for logging messages and attachments in slack
# clarence.wret@gmail.com, cwret@fnal.gov
# Slacker import
from slacker import Slacker
import os
import operator
import datetime
import time
import re
# Neede for exit
import sys
# Needed to pull files from Slack
import urllib2
from shutil import copyfile
# The Slack legacy API key
api_key = "YOUR_API_KEY"
# Make the slacker object which we use for the API
slack = Slacker(api_key)
# The time pattern we want
time_pattern = "%Y-%m-%d_%H:%M:%S"
# Get the month for today
month = datetime.datetime.today().strftime('%Y_%m')
# The logging directory
logfile_dir = "YOUR_POSTING_DIRECTORY/%s"%(month)
if not os.path.exists(logfile_dir):
os.makedirs(logfile_dir)
# User name of bot
user="USER"
# Icon of bot
icon="ICON_POSTER"
# The "header" for the Slack message, aka pretext
subject="MESSAGE"
# Do we post to channels?
post_to_channel = False
# The channel that posts the results of the logger
log_channel = "CHANNEL_TO_POST_LOG"
log_channel_priv = "CHANNEL_TO_POST_PRIVATE_LOG"
# A message class which we loop over in main
class Message:
def __init__(self, message, users):
# Get the text of the message
self.text = message["text"]
# Get the time stamp
self.timestamp, self.msg_id = map(int, message["ts"].split("."))
# Set the time
self.time_formatted = datetime.datetime.fromtimestamp(self.timestamp).strftime(time_pattern)
# Check if the message is a file attachment
try:
self.subtype = message["files"]
except KeyError:
self.subtype = None
self.link = []
self.linkname = []
# Get some file shares and hosted
if self.subtype != None:
# May be many attachments in one message
for tempfile in message["files"]:
# Only care about hosted files
if tempfile["mode"] == "hosted":
self.link.append(tempfile["url_private"])
self.linkname.append(tempfile["name"])
extension = os.path.splitext(self.linkname[-1])[1]
self.linkname.append((os.path.splitext(self.linkname[-1])[0]+"_"+self.time_formatted+extension).replace(" ", "_"))
# Naming of messages is wildly inconsistent...
try:
self.uid = message["user"]
self.username = users[self.uid]
# If something goes wrong with our key
except KeyError:
try:
# Maybe this logged as a bot
self.uid = message["bot_id"]
self.username = message["username"]
# The logging bot (running this program) formats in a special way
if self.username == user:
self.text = message["attachments"][0]["pretext"]+message["attachments"][0]["text"]
except KeyError:
# or maybe this was a comment of a comment
try:
self.uid = message["comment"]["user"]
self.username = users[self.uid]
except KeyError:
# or maybe this was a comment of a attachment
try:
self.uid = message["attachements"]["bot_id"]
self.username = users["username"]
except KeyError:
self.uid = message["bot_id"]
# The official GitHub app seems to post messages, uhm, differently
if self.uid == "B1QTP89JT":
self.username = "GitHub"
altmessage = ""
# GitHub can't deal with Leila's name or one of Kirsty's commit containing \xef
try:
altmessage = message["attachments"][0]["pretext"]
except UnicodeEncodeError:
altmessage = message["attachments"][0]["pretext"].encode('utf-8')
except KeyError:
pass
try:
altmessage += message["attachments"][0]["text"]
except UnicodeEncodeError:
altmessage += message["attachments"][0]["text"].encode('utf-8')
except KeyError:
pass
self.text = altmessage
else:
self.username = "Unknown bot"
def AsLine(self, replacement_dicts):
l = u"[%s]\n %s: %s"%(self.time_formatted, self.username.ljust(17), self.text)
for d in replacement_dicts:
for k, v in d.iteritems():
l = l.replace(k, v)
return l
# The main function to run
def main():
# Get the users, channels and private channels
users = GetUsers()
channels = GetChannels()
priv_channels = GetChannelsPrivate()
# The channel ids for the logging channels
log_channel_id = ""
log_channel_id_priv = ""
#############################
# Do the public channels
n_new_lines = dict([(c,0) for c in channels.iterkeys()])
n_new_attach = dict([(c,0) for c in channels.iterkeys()])
for chan_id, chan_name in channels.iteritems():
logfile_name = logfile_dir+"/%s_log_%s.txt"%(chan_name, month)
last_datetime = get_last_message_datetime(logfile_name)
last_timestamp = time.mktime(last_datetime.timetuple())
# Last true referes to if channels is private
# Slack API changed a few months ago
raw_messages = GetFullMessages(chan_id, chan_name, False)
messages = [Message(m, users) for m in raw_messages]
# Get rid of messages which are too old
messages = [m for m in messages if m.timestamp > last_timestamp]
messages.sort(key=lambda x: x.timestamp)
# Find the logging channels id (not name!)
if chan_name == log_channel:
log_channel_id = chan_id
if chan_name == log_channel_priv:
log_channel_id_priv = chan_id
# Open the file to append to and write the log
with open(logfile_name,"a") as f:
for m in messages:
line = m.AsLine([channels, users])+"\n"
f.write(line.encode('utf8'))
n_new_lines[chan_id] += 1
# Get the attachments in the messages
for m in messages:
if m.link != None:
# Make the directory
logfile_img = logfile_name.strip(".txt")+"_img"
if not os.path.exists(logfile_img):
os.makedirs(logfile_img)
for filenumber in range(len(m.link)):
filename = logfile_img+"/"+m.linkname[filenumber]
# Make the OAuth request using the slack key
req = urllib2.Request(m.link[filenumber], None, {'Authorization' : 'Bearer '+api_key})
response = urllib2.urlopen(req)
file = open(filename, 'wb')
file.write(response.read())
file.close()
n_new_attach[chan_id] += 1
# Finally copy over the htaccess to the directory
copyfile("/home/cwret/slack_logger/keys/htaccess_pub", logfile_dir+"/.htaccess")
#############################
# Now do the private channels
n_priv_new_lines = dict([(c,0) for c in priv_channels.iterkeys()])
n_new_attach_priv = dict([(c,0) for c in priv_channels.iterkeys()])
# Add something private for private channels
privlogfile_dir = logfile_dir+"/private"
# Make the directory
if not os.path.exists(privlogfile_dir):
os.makedirs(privlogfile_dir)
for chan_id, chan_name in priv_channels.iteritems():
logfile_name = privlogfile_dir+"/%s_log_%s.txt"%(chan_name,month)
last_datetime = get_last_message_datetime(logfile_name)
last_timestamp = time.mktime(last_datetime.timetuple())
# Last true referes to if channels is private
# Slack API changed a few months ago
raw_messages = GetFullMessages(chan_id, chan_name, True)
messages = [Message(m, users) for m in raw_messages]
# Get rid of messages which are too old
messages = [m for m in messages if m.timestamp > last_timestamp]
messages.sort(key=lambda x: x.timestamp)
if chan_name == log_channel:
log_channel_id = chan_id
if chan_name == log_channel_priv:
log_channel_id_priv = chan_id
# Open the file to append to and write the log
with open(logfile_name,"a") as f:
for m in messages:
line = m.AsLine([priv_channels, users])+"\n"
f.write(line.encode('utf8'))
n_priv_new_lines[chan_id] += 1
# Get the attachments in the private messages
for m in messages:
if m.link != None:
# Make the directory
logfile_img = logfile_name.strip(".txt")+"_img"
if not os.path.exists(logfile_img):
os.makedirs(logfile_img)
for filenumber in range(len(m.link)):
filename = logfile_img+"/"+m.linkname[filenumber]
# Make the OAuth request using the slack key
req = urllib2.Request(m.link[filenumber], None, {'Authorization' : 'Bearer '+api_key})
response = urllib2.urlopen(req)
file = open(filename, 'wb')
file.write(response.read())
file.close()
n_new_attach[chan_id] += 1
# Finally copy over the htaccess to the directory
copyfile("/home/cwret/slack_logger/keys/htaccess_priv", privlogfile_dir+"/.htaccess")
if post_to_channel:
# The body we will use to send to Slack
body = ""
if log_channel_id != None:
for chan_id,n in n_new_lines.iteritems():
output = "Wrote "+`n`+" messages for #"+channels[chan_id]
body += output+"\n"
print output
post=subject
slack.chat.post_message(
channel=log_channel_id,
as_user=False,
username=user,
icon_url=icon,
attachments=[{"pretext": post,
"fallback": post,
"color": "#36a64f",
"footer": user,
"text": body}])
# Reset the body for the private messages
body=""
if log_channel_id != None:
for chan_id,n in n_priv_new_lines.iteritems():
output = "Wrote "+`n`+" private messages for #"+priv_channels[chan_id]
body += output+"\n"
print output
post=subject+"/private"
slack.chat.post_message(
channel=log_channel_id_priv,
as_user=False,
username=user,
icon_url=icon,
attachments=[{"pretext": post,
"fallback": post,
"color": "#36a64f",
"footer": user,
"text": body}])
return
# Get the last logger message date and time
def get_last_message_datetime(logfile_name):
# Open the logfile that might already be written
try:
f = open(logfile_name, "r")
except IOError:
return datetime.datetime.fromtimestamp(0)
lines = reversed([l for l in f])
f.close()
matcher = re.compile(r"^\[(\d\d\d\d-\d\d-\d\d\_\d\d:\d\d:\d\d)]") # the date-time pattern above
last_datetime = datetime.datetime.fromtimestamp(0)
for l in lines:
m = matcher.search(l)
if m is None: continue
last_time_formatted = m.group(1)
last_datetime = datetime.datetime.strptime(last_time_formatted, time_pattern)
break
return last_datetime
# Get a dict of users for a given slack
def | ():
Users = dict()
l = slack.users.list().body["members"]
for u in l:
Users[u["id"]] = u["name"]
return Users
# Get a dict of channels for a given slack
def GetChannels():
Channels = dict()
l = slack.channels.list().body["channels"]
for c in l:
Channels[c["id"]] = c["name"]
return Channels
# Get a dict of private channels for a given slack
def GetChannelsPrivate():
Priv_Channels = dict()
l = slack.groups.list().body["groups"]
for c in l:
Priv_Channels[c["id"]] = c["name"]
return Priv_Channels
def GetFiles():
Files = dict()
l = slack.files.list()
# Get a full list of messages from Slack
def GetFullMessages(chan_id, chan_name, priv):
# Get the last 1000 messages (maximum we can get from Slack at one time)
if priv == False:
resp = slack.channels.history(chan_id, count=1000, inclusive=True)
else:
resp = slack.groups.history(chan_id, count=1000, inclusive=True)
raw_messages = resp.body["messages"]
# This is true if there are more messages we can get
has_more = resp.body["has_more"]
while has_more:
# Get the timestamp for the earliest message we got in previous iteration
timestamp = resp.body["messages"][-1]["ts"]
# Make another request for the next messages
if priv == False:
resp = slack.channels.history(chan_id, count=1000, inclusive=True, latest=timestamp)
else:
resp = slack.groups.history(chan_id, count=1000, inclusive=True, latest=timestamp)
# Prepend our older messages
raw_messages = resp.body["messages"] + raw_messages
# Check if we still have more
has_more = resp.body["has_more"]
return raw_messages
# The main we run
if __name__=="__main__":
main()
| GetUsers | identifier_name |
api.py | #!/usr/bin/env python2.7
# coding=utf8
import ast
import os
import sqlite3 as lite
from config import (client_id, client_secret, redirect_uri, twitch_client_id,
twitch_client_secret, twitch_redirect_uri, twitch_scopes)
import MySQLdb as mdb
import requests
from flask import Flask, json, redirect, request, session
from flask.ext.cors import CORS
from lib.queries import API
from requests_oauthlib import OAuth2Session
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.route("/api/chat/<string:channel>")
def api_chat_channel(channel):
start = request.args.get("startDate")
end = request.args.get("endDate")
api = API()
messages = api.chat_channel(channel, start, end)
return messages
"""
{
"messageCount": 2,
"messages": [
{
"message": "This is the most recent message response",
"time": "Tue, 01 Dec 2015 09:08:46 GMT",
"author": "exampleusername1"
},
{
"message": "This is a previous message in descending order",
"time": "Tue, 01 Dec 2015 09:07:55 GMT",
"author": "exampleusername2"
}
]
}
"""
@app.route("/api/chat/<string:channel>/<string:username>")
def api_channel_chat_user(channel, username):
start = request.args.get("startDate")
end = request.args.get("endDate")
api = API()
messages = api.channel_chat_user(channel, username, start, end)
return messages
"""
{
"messageCount": 2,
"messages": [
{
"message": "This is an example chat message response",
"time": "Tue, 01 Dec 2015 09:08:46 GMT"
},
{
"message": "This is a subsequent message in descending order",
"time": "Tue, 01 Dec 2015 09:07:55 GMT"
}
]
}
"""
@app.route("/api/points/<string:username>")
def api_points_user(username):
api = API()
points = api.points_user(username)
return points
"""
{
"points": {
"donationPoints": 17030,
"timeInChat": 390,
"timePoints": 78,
"totalPoints": 17420
}
}
"""
@app.route("/api/commands/<string:channel>")
def api_channel_commands(channel):
api = API()
commands = api.channel_commands(channel)
return commands
"""
{
"commandCount": 2,
"commands": [
{
"command": "!testcommand1",
"creator": "exampleusername1",
"response": "Example string response for command",
"time": "Tue, 01 Dec 2015 02:07:10 GMT",
"timesUsed": 1,
"userLevel": "reg"
},
{
"command": "!testcommand2",
"creator": "exampleusername2",
"response": "Another example string response",
"time": "Tue, 01 Dec 2015 02:05:06 GMT",
"timesUsed": 2,
"userLevel": "mod"
}
]
}
"""
@app.route("/api/items")
def api_items():
api = API()
items = api.items()
return items
"""
{
"items": [
{
"itemId": 0,
"itemName": "Nugget",
"itemValue": 1000
},
{
"itemId": 1,
"itemName": "Fire Stone",
"itemValue": 750
}
]
}
"""
@app.route("/api/items/<string:username>")
def api_items_username(username):
|
@app.route("/api/pokemon/<string:username>")
def api_pokemon_username(username):
api = API()
party = api.pokemon_username(username)
return party
"""
{
"party": [
{
"caughtBy": "singlerider",
"level": 5,
"nickname": "Scyther",
"pokemonId": 123,
"position": 1,
"sale": {
"askingPrice": null,
"forSale": 0
},
"trade": {
"askingLevel": null,
"askingTrade": null,
"forTrade": 0
}
},
{
"caughtBy": "singlerider",
"level": 28,
"nickname": "Jolteon",
"pokemonId": 135,
"position": 2,
"sale": {
"askingPrice": null,
"forSale": 0
},
"trade": {
"askingLevel": 5,
"askingTrade": 64,
"forTrade": 2
}
}
],
"partyCount": 2
}
"""
@app.route("/api/chatters/<string:channel>")
def api_channel_chatters(channel):
url = "https://tmi.twitch.tv/group/user/{0}/chatters".format(channel)
resp = requests.get(url)
data = ast.literal_eval(resp.content)
return json.jsonify(data)
"""
{
"_links": {},
"chatter_count": 148,
"chatters": {
"admins": [],
"global_mods": [],
"moderators": [
"moderatorname1",
"moderatorname2"
],
"staff": [],
"viewers": [
"1o1canadian",
"agentjesse"
]
}
}
"""
@app.route("/api/quotes/<string:channel>")
def api_channel_quotes(channel):
api = API()
quotes = api.channel_quotes(channel)
return quotes
"""
{
"quoteCount": 15,
"quotes": [
{
"createdBy": "singlerider",
"game": "H1Z1",
"quote": "we were just talking about you and your awesome cable management skills",
"quoteNumber": 1
},
{
"createdBy": "joecow",
"game": "H1Z1",
"quote": "JoeCow is the best -Everyone ever 2016",
"quoteNumber": 2
}
}
"""
# ################ OAUTH PORTION # TODO: MOVE TO ANOTHER FILE ############### #
@app.route("/twitchalerts/authorize")
def twitchalerts_authorize():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
authorization_base_url = 'https://www.twitchalerts.com/api/v1.0/authorize'
scope = ["donations.read", "donations.create"]
twitchalerts = OAuth2Session(
client_id, scope=scope, redirect_uri=redirect_uri)
authorization_url, state = twitchalerts.authorization_url(
authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
# Step 2: User authorization, this happens on the provider.
@app.route("/twitchalerts/authorized", methods=["GET", "POST"])
def twitchalerts_authorized():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
token_url = 'https://www.twitchalerts.com/api/v1.0/token'
code = request.args.get('code', '')
twitchalerts = OAuth2Session(
client_id, redirect_uri=redirect_uri) # state=session['oauth_state']
token = twitchalerts.fetch_token(
token_url, client_secret=client_secret, code=code)
params = {'access_token': token['access_token'], 'limit': 100}
data = twitchalerts.get(
'https://www.twitchalerts.com/api/v1.0/donations', params=params)
return str(token["access_token"])
@app.route("/twitch/authorize")
def twitch_authorize():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
authorization_base_url = "https://api.twitch.tv/kraken/oauth2/authorize" + \
"?response_type=code" + \
"&client_id=" + twitch_client_id + \
"&redirect_uri=" + twitch_redirect_uri
scope = twitch_scopes
twitch = OAuth2Session(
client_id=twitch_client_id, scope=scope,
redirect_uri=twitch_redirect_uri)
authorization_url, state = twitch.authorization_url(authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
@app.route("/twitch/authorized", methods=["GET", "POST"])
def twitch_authorized():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
token_url = "https://api.twitch.tv/kraken/oauth2/token"
code = request.args.get('code', '')
twitch = OAuth2Session(
client_id=twitch_client_id, scope=twitch_scopes,
redirect_uri=twitch_redirect_uri)
token = twitch.fetch_token(
token_url, client_secret=twitch_client_secret, code=code)
username_url = "https://api.twitch.tv/kraken?oauth_token=" + \
token["access_token"]
username_resp = requests.get(url=username_url)
username = json.loads(username_resp.content)["token"]["user_name"]
con = lite.connect("twitch.db", check_same_thread=False)
with con:
cur = con.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS auth(
id INTEGER PRIMARY KEY,
channel TEXT UNIQUE, twitch_oauth TEXT,
twitchalerts_oauth TEXT, streamtip_oauth TEXT);
""")
con.commit()
cur.execute("""
INSERT OR IGNORE INTO auth VALUES (NULL, ?, ?, NULL, NULL);
""", [username, token["access_token"]])
cur.execute("""
UPDATE auth SET twitch_oauth = ? WHERE channel = ?;
""", [token["access_token"], username])
try:
con = mdb.connect("localhost", "root", "", "twitchcurvyllama")
with con:
cur = con.cursor()
cur.execute("""
INSERT INTO auth (channel, twitch_oauth) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE twitch_oauth = %s
""", [username, token["access_token"], token["access_token"]])
cur.close()
except:
pass
return str("It worked! Thanks, " + username)
if __name__ == "__main__":
# This allows us to use a plain HTTP callback
os.environ["DEBUG"] = "1"
app.secret_key = os.urandom(24)
app.run(threaded=True, host="0.0.0.0", port=8080)
| api = API()
items = api.items_username(username)
return items
"""
{
"itemCount": 1,
"items": [
{
"itemId": 2,
"itemName": "Water Stone",
"itemQuantity": 1
}
]
}
""" | identifier_body |
api.py | #!/usr/bin/env python2.7
# coding=utf8
import ast
import os
import sqlite3 as lite
from config import (client_id, client_secret, redirect_uri, twitch_client_id,
twitch_client_secret, twitch_redirect_uri, twitch_scopes)
import MySQLdb as mdb
import requests
from flask import Flask, json, redirect, request, session
from flask.ext.cors import CORS
from lib.queries import API
from requests_oauthlib import OAuth2Session
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.route("/api/chat/<string:channel>")
def api_chat_channel(channel):
start = request.args.get("startDate")
end = request.args.get("endDate")
api = API()
messages = api.chat_channel(channel, start, end)
return messages
"""
{
"messageCount": 2,
"messages": [
{
"message": "This is the most recent message response",
"time": "Tue, 01 Dec 2015 09:08:46 GMT",
"author": "exampleusername1"
},
{
"message": "This is a previous message in descending order",
"time": "Tue, 01 Dec 2015 09:07:55 GMT",
"author": "exampleusername2"
}
]
}
"""
@app.route("/api/chat/<string:channel>/<string:username>")
def api_channel_chat_user(channel, username):
start = request.args.get("startDate")
end = request.args.get("endDate")
api = API()
messages = api.channel_chat_user(channel, username, start, end)
return messages
"""
{
"messageCount": 2,
"messages": [
{
"message": "This is an example chat message response",
"time": "Tue, 01 Dec 2015 09:08:46 GMT"
},
{
"message": "This is a subsequent message in descending order",
"time": "Tue, 01 Dec 2015 09:07:55 GMT"
}
]
}
"""
@app.route("/api/points/<string:username>")
def api_points_user(username):
api = API()
points = api.points_user(username)
return points
"""
{
"points": {
"donationPoints": 17030,
"timeInChat": 390,
"timePoints": 78,
"totalPoints": 17420
}
}
"""
@app.route("/api/commands/<string:channel>")
def api_channel_commands(channel):
api = API()
commands = api.channel_commands(channel)
return commands
"""
{
"commandCount": 2,
"commands": [
{
"command": "!testcommand1",
"creator": "exampleusername1",
"response": "Example string response for command",
"time": "Tue, 01 Dec 2015 02:07:10 GMT",
"timesUsed": 1,
"userLevel": "reg"
},
{
"command": "!testcommand2",
"creator": "exampleusername2",
"response": "Another example string response",
"time": "Tue, 01 Dec 2015 02:05:06 GMT",
"timesUsed": 2,
"userLevel": "mod"
}
]
}
"""
@app.route("/api/items")
def api_items():
api = API()
items = api.items()
return items
"""
{
"items": [
{
"itemId": 0,
"itemName": "Nugget",
"itemValue": 1000
},
{
"itemId": 1,
"itemName": "Fire Stone",
"itemValue": 750
}
]
}
""" |
@app.route("/api/items/<string:username>")
def api_items_username(username):
api = API()
items = api.items_username(username)
return items
"""
{
"itemCount": 1,
"items": [
{
"itemId": 2,
"itemName": "Water Stone",
"itemQuantity": 1
}
]
}
"""
@app.route("/api/pokemon/<string:username>")
def api_pokemon_username(username):
api = API()
party = api.pokemon_username(username)
return party
"""
{
"party": [
{
"caughtBy": "singlerider",
"level": 5,
"nickname": "Scyther",
"pokemonId": 123,
"position": 1,
"sale": {
"askingPrice": null,
"forSale": 0
},
"trade": {
"askingLevel": null,
"askingTrade": null,
"forTrade": 0
}
},
{
"caughtBy": "singlerider",
"level": 28,
"nickname": "Jolteon",
"pokemonId": 135,
"position": 2,
"sale": {
"askingPrice": null,
"forSale": 0
},
"trade": {
"askingLevel": 5,
"askingTrade": 64,
"forTrade": 2
}
}
],
"partyCount": 2
}
"""
@app.route("/api/chatters/<string:channel>")
def api_channel_chatters(channel):
url = "https://tmi.twitch.tv/group/user/{0}/chatters".format(channel)
resp = requests.get(url)
data = ast.literal_eval(resp.content)
return json.jsonify(data)
"""
{
"_links": {},
"chatter_count": 148,
"chatters": {
"admins": [],
"global_mods": [],
"moderators": [
"moderatorname1",
"moderatorname2"
],
"staff": [],
"viewers": [
"1o1canadian",
"agentjesse"
]
}
}
"""
@app.route("/api/quotes/<string:channel>")
def api_channel_quotes(channel):
api = API()
quotes = api.channel_quotes(channel)
return quotes
"""
{
"quoteCount": 15,
"quotes": [
{
"createdBy": "singlerider",
"game": "H1Z1",
"quote": "we were just talking about you and your awesome cable management skills",
"quoteNumber": 1
},
{
"createdBy": "joecow",
"game": "H1Z1",
"quote": "JoeCow is the best -Everyone ever 2016",
"quoteNumber": 2
}
}
"""
# ################ OAUTH PORTION # TODO: MOVE TO ANOTHER FILE ############### #
@app.route("/twitchalerts/authorize")
def twitchalerts_authorize():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
authorization_base_url = 'https://www.twitchalerts.com/api/v1.0/authorize'
scope = ["donations.read", "donations.create"]
twitchalerts = OAuth2Session(
client_id, scope=scope, redirect_uri=redirect_uri)
authorization_url, state = twitchalerts.authorization_url(
authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
# Step 2: User authorization, this happens on the provider.
@app.route("/twitchalerts/authorized", methods=["GET", "POST"])
def twitchalerts_authorized():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
token_url = 'https://www.twitchalerts.com/api/v1.0/token'
code = request.args.get('code', '')
twitchalerts = OAuth2Session(
client_id, redirect_uri=redirect_uri) # state=session['oauth_state']
token = twitchalerts.fetch_token(
token_url, client_secret=client_secret, code=code)
params = {'access_token': token['access_token'], 'limit': 100}
data = twitchalerts.get(
'https://www.twitchalerts.com/api/v1.0/donations', params=params)
return str(token["access_token"])
@app.route("/twitch/authorize")
def twitch_authorize():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
authorization_base_url = "https://api.twitch.tv/kraken/oauth2/authorize" + \
"?response_type=code" + \
"&client_id=" + twitch_client_id + \
"&redirect_uri=" + twitch_redirect_uri
scope = twitch_scopes
twitch = OAuth2Session(
client_id=twitch_client_id, scope=scope,
redirect_uri=twitch_redirect_uri)
authorization_url, state = twitch.authorization_url(authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
@app.route("/twitch/authorized", methods=["GET", "POST"])
def twitch_authorized():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
token_url = "https://api.twitch.tv/kraken/oauth2/token"
code = request.args.get('code', '')
twitch = OAuth2Session(
client_id=twitch_client_id, scope=twitch_scopes,
redirect_uri=twitch_redirect_uri)
token = twitch.fetch_token(
token_url, client_secret=twitch_client_secret, code=code)
username_url = "https://api.twitch.tv/kraken?oauth_token=" + \
token["access_token"]
username_resp = requests.get(url=username_url)
username = json.loads(username_resp.content)["token"]["user_name"]
con = lite.connect("twitch.db", check_same_thread=False)
with con:
cur = con.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS auth(
id INTEGER PRIMARY KEY,
channel TEXT UNIQUE, twitch_oauth TEXT,
twitchalerts_oauth TEXT, streamtip_oauth TEXT);
""")
con.commit()
cur.execute("""
INSERT OR IGNORE INTO auth VALUES (NULL, ?, ?, NULL, NULL);
""", [username, token["access_token"]])
cur.execute("""
UPDATE auth SET twitch_oauth = ? WHERE channel = ?;
""", [token["access_token"], username])
try:
con = mdb.connect("localhost", "root", "", "twitchcurvyllama")
with con:
cur = con.cursor()
cur.execute("""
INSERT INTO auth (channel, twitch_oauth) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE twitch_oauth = %s
""", [username, token["access_token"], token["access_token"]])
cur.close()
except:
pass
return str("It worked! Thanks, " + username)
if __name__ == "__main__":
# This allows us to use a plain HTTP callback
os.environ["DEBUG"] = "1"
app.secret_key = os.urandom(24)
app.run(threaded=True, host="0.0.0.0", port=8080) | random_line_split | |
api.py | #!/usr/bin/env python2.7
# coding=utf8
import ast
import os
import sqlite3 as lite
from config import (client_id, client_secret, redirect_uri, twitch_client_id,
twitch_client_secret, twitch_redirect_uri, twitch_scopes)
import MySQLdb as mdb
import requests
from flask import Flask, json, redirect, request, session
from flask.ext.cors import CORS
from lib.queries import API
from requests_oauthlib import OAuth2Session
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.route("/api/chat/<string:channel>")
def api_chat_channel(channel):
start = request.args.get("startDate")
end = request.args.get("endDate")
api = API()
messages = api.chat_channel(channel, start, end)
return messages
"""
{
"messageCount": 2,
"messages": [
{
"message": "This is the most recent message response",
"time": "Tue, 01 Dec 2015 09:08:46 GMT",
"author": "exampleusername1"
},
{
"message": "This is a previous message in descending order",
"time": "Tue, 01 Dec 2015 09:07:55 GMT",
"author": "exampleusername2"
}
]
}
"""
@app.route("/api/chat/<string:channel>/<string:username>")
def api_channel_chat_user(channel, username):
start = request.args.get("startDate")
end = request.args.get("endDate")
api = API()
messages = api.channel_chat_user(channel, username, start, end)
return messages
"""
{
"messageCount": 2,
"messages": [
{
"message": "This is an example chat message response",
"time": "Tue, 01 Dec 2015 09:08:46 GMT"
},
{
"message": "This is a subsequent message in descending order",
"time": "Tue, 01 Dec 2015 09:07:55 GMT"
}
]
}
"""
@app.route("/api/points/<string:username>")
def api_points_user(username):
api = API()
points = api.points_user(username)
return points
"""
{
"points": {
"donationPoints": 17030,
"timeInChat": 390,
"timePoints": 78,
"totalPoints": 17420
}
}
"""
@app.route("/api/commands/<string:channel>")
def api_channel_commands(channel):
api = API()
commands = api.channel_commands(channel)
return commands
"""
{
"commandCount": 2,
"commands": [
{
"command": "!testcommand1",
"creator": "exampleusername1",
"response": "Example string response for command",
"time": "Tue, 01 Dec 2015 02:07:10 GMT",
"timesUsed": 1,
"userLevel": "reg"
},
{
"command": "!testcommand2",
"creator": "exampleusername2",
"response": "Another example string response",
"time": "Tue, 01 Dec 2015 02:05:06 GMT",
"timesUsed": 2,
"userLevel": "mod"
}
]
}
"""
@app.route("/api/items")
def api_items():
api = API()
items = api.items()
return items
"""
{
"items": [
{
"itemId": 0,
"itemName": "Nugget",
"itemValue": 1000
},
{
"itemId": 1,
"itemName": "Fire Stone",
"itemValue": 750
}
]
}
"""
@app.route("/api/items/<string:username>")
def api_items_username(username):
api = API()
items = api.items_username(username)
return items
"""
{
"itemCount": 1,
"items": [
{
"itemId": 2,
"itemName": "Water Stone",
"itemQuantity": 1
}
]
}
"""
@app.route("/api/pokemon/<string:username>")
def api_pokemon_username(username):
api = API()
party = api.pokemon_username(username)
return party
"""
{
"party": [
{
"caughtBy": "singlerider",
"level": 5,
"nickname": "Scyther",
"pokemonId": 123,
"position": 1,
"sale": {
"askingPrice": null,
"forSale": 0
},
"trade": {
"askingLevel": null,
"askingTrade": null,
"forTrade": 0
}
},
{
"caughtBy": "singlerider",
"level": 28,
"nickname": "Jolteon",
"pokemonId": 135,
"position": 2,
"sale": {
"askingPrice": null,
"forSale": 0
},
"trade": {
"askingLevel": 5,
"askingTrade": 64,
"forTrade": 2
}
}
],
"partyCount": 2
}
"""
@app.route("/api/chatters/<string:channel>")
def api_channel_chatters(channel):
url = "https://tmi.twitch.tv/group/user/{0}/chatters".format(channel)
resp = requests.get(url)
data = ast.literal_eval(resp.content)
return json.jsonify(data)
"""
{
"_links": {},
"chatter_count": 148,
"chatters": {
"admins": [],
"global_mods": [],
"moderators": [
"moderatorname1",
"moderatorname2"
],
"staff": [],
"viewers": [
"1o1canadian",
"agentjesse"
]
}
}
"""
@app.route("/api/quotes/<string:channel>")
def api_channel_quotes(channel):
api = API()
quotes = api.channel_quotes(channel)
return quotes
"""
{
"quoteCount": 15,
"quotes": [
{
"createdBy": "singlerider",
"game": "H1Z1",
"quote": "we were just talking about you and your awesome cable management skills",
"quoteNumber": 1
},
{
"createdBy": "joecow",
"game": "H1Z1",
"quote": "JoeCow is the best -Everyone ever 2016",
"quoteNumber": 2
}
}
"""
# ################ OAUTH PORTION # TODO: MOVE TO ANOTHER FILE ############### #
@app.route("/twitchalerts/authorize")
def twitchalerts_authorize():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
authorization_base_url = 'https://www.twitchalerts.com/api/v1.0/authorize'
scope = ["donations.read", "donations.create"]
twitchalerts = OAuth2Session(
client_id, scope=scope, redirect_uri=redirect_uri)
authorization_url, state = twitchalerts.authorization_url(
authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
# Step 2: User authorization, this happens on the provider.
@app.route("/twitchalerts/authorized", methods=["GET", "POST"])
def twitchalerts_authorized():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
token_url = 'https://www.twitchalerts.com/api/v1.0/token'
code = request.args.get('code', '')
twitchalerts = OAuth2Session(
client_id, redirect_uri=redirect_uri) # state=session['oauth_state']
token = twitchalerts.fetch_token(
token_url, client_secret=client_secret, code=code)
params = {'access_token': token['access_token'], 'limit': 100}
data = twitchalerts.get(
'https://www.twitchalerts.com/api/v1.0/donations', params=params)
return str(token["access_token"])
@app.route("/twitch/authorize")
def twitch_authorize():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
authorization_base_url = "https://api.twitch.tv/kraken/oauth2/authorize" + \
"?response_type=code" + \
"&client_id=" + twitch_client_id + \
"&redirect_uri=" + twitch_redirect_uri
scope = twitch_scopes
twitch = OAuth2Session(
client_id=twitch_client_id, scope=scope,
redirect_uri=twitch_redirect_uri)
authorization_url, state = twitch.authorization_url(authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
@app.route("/twitch/authorized", methods=["GET", "POST"])
def twitch_authorized():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
token_url = "https://api.twitch.tv/kraken/oauth2/token"
code = request.args.get('code', '')
twitch = OAuth2Session(
client_id=twitch_client_id, scope=twitch_scopes,
redirect_uri=twitch_redirect_uri)
token = twitch.fetch_token(
token_url, client_secret=twitch_client_secret, code=code)
username_url = "https://api.twitch.tv/kraken?oauth_token=" + \
token["access_token"]
username_resp = requests.get(url=username_url)
username = json.loads(username_resp.content)["token"]["user_name"]
con = lite.connect("twitch.db", check_same_thread=False)
with con:
cur = con.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS auth(
id INTEGER PRIMARY KEY,
channel TEXT UNIQUE, twitch_oauth TEXT,
twitchalerts_oauth TEXT, streamtip_oauth TEXT);
""")
con.commit()
cur.execute("""
INSERT OR IGNORE INTO auth VALUES (NULL, ?, ?, NULL, NULL);
""", [username, token["access_token"]])
cur.execute("""
UPDATE auth SET twitch_oauth = ? WHERE channel = ?;
""", [token["access_token"], username])
try:
con = mdb.connect("localhost", "root", "", "twitchcurvyllama")
with con:
cur = con.cursor()
cur.execute("""
INSERT INTO auth (channel, twitch_oauth) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE twitch_oauth = %s
""", [username, token["access_token"], token["access_token"]])
cur.close()
except:
pass
return str("It worked! Thanks, " + username)
if __name__ == "__main__":
# This allows us to use a plain HTTP callback
| os.environ["DEBUG"] = "1"
app.secret_key = os.urandom(24)
app.run(threaded=True, host="0.0.0.0", port=8080) | conditional_block | |
api.py | #!/usr/bin/env python2.7
# coding=utf8
import ast
import os
import sqlite3 as lite
from config import (client_id, client_secret, redirect_uri, twitch_client_id,
twitch_client_secret, twitch_redirect_uri, twitch_scopes)
import MySQLdb as mdb
import requests
from flask import Flask, json, redirect, request, session
from flask.ext.cors import CORS
from lib.queries import API
from requests_oauthlib import OAuth2Session
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.route("/api/chat/<string:channel>")
def api_chat_channel(channel):
start = request.args.get("startDate")
end = request.args.get("endDate")
api = API()
messages = api.chat_channel(channel, start, end)
return messages
"""
{
"messageCount": 2,
"messages": [
{
"message": "This is the most recent message response",
"time": "Tue, 01 Dec 2015 09:08:46 GMT",
"author": "exampleusername1"
},
{
"message": "This is a previous message in descending order",
"time": "Tue, 01 Dec 2015 09:07:55 GMT",
"author": "exampleusername2"
}
]
}
"""
@app.route("/api/chat/<string:channel>/<string:username>")
def api_channel_chat_user(channel, username):
start = request.args.get("startDate")
end = request.args.get("endDate")
api = API()
messages = api.channel_chat_user(channel, username, start, end)
return messages
"""
{
"messageCount": 2,
"messages": [
{
"message": "This is an example chat message response",
"time": "Tue, 01 Dec 2015 09:08:46 GMT"
},
{
"message": "This is a subsequent message in descending order",
"time": "Tue, 01 Dec 2015 09:07:55 GMT"
}
]
}
"""
@app.route("/api/points/<string:username>")
def api_points_user(username):
api = API()
points = api.points_user(username)
return points
"""
{
"points": {
"donationPoints": 17030,
"timeInChat": 390,
"timePoints": 78,
"totalPoints": 17420
}
}
"""
@app.route("/api/commands/<string:channel>")
def | (channel):
api = API()
commands = api.channel_commands(channel)
return commands
"""
{
"commandCount": 2,
"commands": [
{
"command": "!testcommand1",
"creator": "exampleusername1",
"response": "Example string response for command",
"time": "Tue, 01 Dec 2015 02:07:10 GMT",
"timesUsed": 1,
"userLevel": "reg"
},
{
"command": "!testcommand2",
"creator": "exampleusername2",
"response": "Another example string response",
"time": "Tue, 01 Dec 2015 02:05:06 GMT",
"timesUsed": 2,
"userLevel": "mod"
}
]
}
"""
@app.route("/api/items")
def api_items():
api = API()
items = api.items()
return items
"""
{
"items": [
{
"itemId": 0,
"itemName": "Nugget",
"itemValue": 1000
},
{
"itemId": 1,
"itemName": "Fire Stone",
"itemValue": 750
}
]
}
"""
@app.route("/api/items/<string:username>")
def api_items_username(username):
api = API()
items = api.items_username(username)
return items
"""
{
"itemCount": 1,
"items": [
{
"itemId": 2,
"itemName": "Water Stone",
"itemQuantity": 1
}
]
}
"""
@app.route("/api/pokemon/<string:username>")
def api_pokemon_username(username):
api = API()
party = api.pokemon_username(username)
return party
"""
{
"party": [
{
"caughtBy": "singlerider",
"level": 5,
"nickname": "Scyther",
"pokemonId": 123,
"position": 1,
"sale": {
"askingPrice": null,
"forSale": 0
},
"trade": {
"askingLevel": null,
"askingTrade": null,
"forTrade": 0
}
},
{
"caughtBy": "singlerider",
"level": 28,
"nickname": "Jolteon",
"pokemonId": 135,
"position": 2,
"sale": {
"askingPrice": null,
"forSale": 0
},
"trade": {
"askingLevel": 5,
"askingTrade": 64,
"forTrade": 2
}
}
],
"partyCount": 2
}
"""
@app.route("/api/chatters/<string:channel>")
def api_channel_chatters(channel):
url = "https://tmi.twitch.tv/group/user/{0}/chatters".format(channel)
resp = requests.get(url)
data = ast.literal_eval(resp.content)
return json.jsonify(data)
"""
{
"_links": {},
"chatter_count": 148,
"chatters": {
"admins": [],
"global_mods": [],
"moderators": [
"moderatorname1",
"moderatorname2"
],
"staff": [],
"viewers": [
"1o1canadian",
"agentjesse"
]
}
}
"""
@app.route("/api/quotes/<string:channel>")
def api_channel_quotes(channel):
api = API()
quotes = api.channel_quotes(channel)
return quotes
"""
{
"quoteCount": 15,
"quotes": [
{
"createdBy": "singlerider",
"game": "H1Z1",
"quote": "we were just talking about you and your awesome cable management skills",
"quoteNumber": 1
},
{
"createdBy": "joecow",
"game": "H1Z1",
"quote": "JoeCow is the best -Everyone ever 2016",
"quoteNumber": 2
}
}
"""
# ################ OAUTH PORTION # TODO: MOVE TO ANOTHER FILE ############### #
@app.route("/twitchalerts/authorize")
def twitchalerts_authorize():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
authorization_base_url = 'https://www.twitchalerts.com/api/v1.0/authorize'
scope = ["donations.read", "donations.create"]
twitchalerts = OAuth2Session(
client_id, scope=scope, redirect_uri=redirect_uri)
authorization_url, state = twitchalerts.authorization_url(
authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
# Step 2: User authorization, this happens on the provider.
@app.route("/twitchalerts/authorized", methods=["GET", "POST"])
def twitchalerts_authorized():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
token_url = 'https://www.twitchalerts.com/api/v1.0/token'
code = request.args.get('code', '')
twitchalerts = OAuth2Session(
client_id, redirect_uri=redirect_uri) # state=session['oauth_state']
token = twitchalerts.fetch_token(
token_url, client_secret=client_secret, code=code)
params = {'access_token': token['access_token'], 'limit': 100}
data = twitchalerts.get(
'https://www.twitchalerts.com/api/v1.0/donations', params=params)
return str(token["access_token"])
@app.route("/twitch/authorize")
def twitch_authorize():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
authorization_base_url = "https://api.twitch.tv/kraken/oauth2/authorize" + \
"?response_type=code" + \
"&client_id=" + twitch_client_id + \
"&redirect_uri=" + twitch_redirect_uri
scope = twitch_scopes
twitch = OAuth2Session(
client_id=twitch_client_id, scope=scope,
redirect_uri=twitch_redirect_uri)
authorization_url, state = twitch.authorization_url(authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
@app.route("/twitch/authorized", methods=["GET", "POST"])
def twitch_authorized():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
token_url = "https://api.twitch.tv/kraken/oauth2/token"
code = request.args.get('code', '')
twitch = OAuth2Session(
client_id=twitch_client_id, scope=twitch_scopes,
redirect_uri=twitch_redirect_uri)
token = twitch.fetch_token(
token_url, client_secret=twitch_client_secret, code=code)
username_url = "https://api.twitch.tv/kraken?oauth_token=" + \
token["access_token"]
username_resp = requests.get(url=username_url)
username = json.loads(username_resp.content)["token"]["user_name"]
con = lite.connect("twitch.db", check_same_thread=False)
with con:
cur = con.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS auth(
id INTEGER PRIMARY KEY,
channel TEXT UNIQUE, twitch_oauth TEXT,
twitchalerts_oauth TEXT, streamtip_oauth TEXT);
""")
con.commit()
cur.execute("""
INSERT OR IGNORE INTO auth VALUES (NULL, ?, ?, NULL, NULL);
""", [username, token["access_token"]])
cur.execute("""
UPDATE auth SET twitch_oauth = ? WHERE channel = ?;
""", [token["access_token"], username])
try:
con = mdb.connect("localhost", "root", "", "twitchcurvyllama")
with con:
cur = con.cursor()
cur.execute("""
INSERT INTO auth (channel, twitch_oauth) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE twitch_oauth = %s
""", [username, token["access_token"], token["access_token"]])
cur.close()
except:
pass
return str("It worked! Thanks, " + username)
if __name__ == "__main__":
# This allows us to use a plain HTTP callback
os.environ["DEBUG"] = "1"
app.secret_key = os.urandom(24)
app.run(threaded=True, host="0.0.0.0", port=8080)
| api_channel_commands | identifier_name |
main.py | #!/usr/bin/env python
"""Process the 3d model data and create required files for NMS.
This function will take all the data provided by the blender script and create a number of
.exml files that contain all the data required by the game to view the 3d model created.
"""
__author__ = "monkeyman192"
__credits__ = ["monkeyman192", "gregkwaste"]
from classes import *
import os
import subprocess
from LOOKUPS import *
from shutil import copy2
from array import array
from mbincompiler import mbinCompiler
BASEPATH = 'CUSTOMMODELS'
def traverse(obj):
# a custom generator to iterate over the tree of all the children on the scene (including the Model object)
# this returns objects from the branches inwards (which *shouldn't* be a problem...)
for child in obj.Children:
for subvalue in traverse(child):
yield subvalue
else:
yield obj
# simple function to take a list and move the entry at the ith index to the first place
def movetofront(lst, i):
k = lst.pop(i) # this will break if i > len(lst)...
return [k] + lst
class Create_Data():
def __init__(self, name, directory, model, anim_data = dict(), descriptor = None, **commands):
"""
name - the name of the file we want to create. Most entities within will have a name derived from this.
directory - the full relative location of where the scene file will be located.
model - The Model object that contains all the child nodes (of a number of different types)
"""
self.name = name # this is the name of the file
self.directory = directory # the path that the file is supposed to be located at
self.Model = model # this is the main model file for the entire scene.
self.anim_data = anim_data # animation data (defaults to None)
self.descriptor = descriptor
self.fix_names()
# assign each of the input streams to a variable
self.index_stream = []
self.vertex_stream = []
self.uv_stream = []
self.n_stream = []
self.t_stream = []
self.chvertex_stream = []
self.materials = set() # this will hopefully mean that there will be at most one copy of each unique TkMaterialData struct in the set
#self.Entities = [] # a list of any extra properties to go in each entity
# extract the streams from the mesh objects.
index = 0
for mesh in self.Model.ListOfMeshes:
self.index_stream.append(mesh.Indexes)
self.vertex_stream.append(mesh.Vertices)
self.uv_stream.append(mesh.UVs)
self.n_stream.append(mesh.Normals)
self.t_stream.append(mesh.Tangents)
self.chvertex_stream.append(mesh.CHVerts)
# also add in the material data to the list
if mesh.Material is not None:
self.materials.add(mesh.Material)
mesh.ID = index # assign the index location of the data to the Object so that it knows where its data is
index += 1
#for obj in self.Model.ListOfEntities:
# self.Entities.append(obj.EntityData)
self.num_mesh_objs = index # this is the total number of objects that have mesh data
self.mesh_data = [dict()]*self.num_mesh_objs # an empty list of dicts that will ber populated then each entry will
# be given back to the correct Mesh or Collision object
self.preprocess_streams()
# generate some variables relating to the paths
self.path = os.path.join(BASEPATH, self.directory, self.name) # the path location including the file name.
self.texture_path = os.path.join(self.path, 'TEXTURES')
self.anims_path = os.path.join(BASEPATH, self.directory, 'ANIMS')
self.ent_path = os.path.join(self.path, 'ENTITIES') # path location of the entity folder. Calling makedirs of this will ensure all the folders are made in one go
self.create_paths()
# This dictionary contains all the information for the geometry file
self.GeometryData = dict()
# This will just be some default entity with physics data
self.TkAttachmentData = TkAttachmentData() # this is created with the Physics Component Data by default
self.TkAttachmentData.make_elements(main=True)
self.process_data()
self.get_bounds()
self.create_vertex_layouts() # this creates the VertexLayout and SmallVertexLayout properties
# Material defaults
self.process_materials()
self.process_nodes()
self.mix_streams() # make this last to make sure flattening each stream doesn't affect other data.
# Assign each of the class objects that contain all of the data their data
self.TkGeometryData = TkGeometryData(**self.GeometryData)
self.TkGeometryData.make_elements(main=True)
self.Model.construct_data()
self.TkSceneNodeData = self.Model.get_data()
self.TkSceneNodeData.make_elements(main=True) # get the model to create all the required data and this will continue on down the tree
if len(self.descriptor) != 0:
self.descriptor = self.descriptor.to_exml()
self.descriptor.make_elements(main = True)
else:
self.descriptor = None
for material in self.materials:
if type(material) != str:
material.make_elements(main=True)
for anim_name in list(self.anim_data.keys()):
self.anim_data[anim_name].make_elements(main=True)
# write all the files
self.write()
# convert all the created exml files to mbin files
if not commands.get('dont_compile', False):
self.convert_to_mbin()
def create_paths(self):
# check whether the require paths exist and make them
if not os.path.exists(self.ent_path):
os.makedirs(self.ent_path)
if not os.path.exists(self.texture_path):
os.makedirs(self.texture_path)
if not os.path.exists(self.anims_path):
os.makedirs(self.anims_path)
def preprocess_streams(self):
# this will iterate through the Mesh objects and check that each of them has the same number of input streams. Any that don't will be flagged and a message will be raised
streams = set()
for mesh in self.Model.ListOfMeshes:
# first find all the streams over all the meshes that have been provided
streams = streams.union(mesh.provided_streams)
for mesh in self.Model.ListOfMeshes:
# next go back over the list and compare. If an entry isn't in the list of provided streams print a messge (maybe make a new error for this to be raised?)
diff = streams.difference(mesh.provided_streams)
if diff != set():
print('ERROR! Object {0} is missing the streams: {1}'.format(mesh.Name, diff))
if 'Vertices' in diff or 'Indexes' in diff:
print('CRITICAL ERROR! No vertex and/or index data provided for {} Object'.format(mesh.Name))
self.stream_list = list(SEMANTICS[x] for x in streams.difference({'Indexes'}))
self.stream_list.sort()
# secondly this will generate two lists containing the individual lengths of each stream
self.i_stream_lens = list()
self.v_stream_lens = list()
self.ch_stream_lens = list()
# to fix 1.3x mesh collisions, we need to make all the mesh collisions have their indexes first
# we require a mapping to know which is which though
self.index_mapping = list(range(len(self.Model.ListOfMeshes))) # the unchanged mapping
for i in range(len(self.Model.ListOfMeshes)):
mesh = self.Model.ListOfMeshes[i]
if mesh._Type == 'COLLISION':
if mesh.CType == 'Mesh':
self.index_mapping = movetofront(self.index_mapping, i) # move the index it is now located at so we can construct it correctly in the scene
print(self.index_mapping, 'index_mapping')
# populate the lists containing the lengths of each individual stream
for index in range(self.num_mesh_objs):
self.i_stream_lens.append(len(self.index_stream[index]))
self.v_stream_lens.append(len(self.vertex_stream[index]))
self.ch_stream_lens.append(len(self.chvertex_stream[index]))
def fix_names(self):
# just make sure that the name and path is all in uppercase
self.name = self.name.upper()
self.directory = self.directory.upper()
def process_data(self):
# This will do the main processing of the different streams.
# indexes
index_counts = list(3*x for x in self.i_stream_lens) # the total number of index points in each object
print(index_counts, 'index counts')
# now, re-order the indexes:
new_index_counts = list(index_counts[self.index_mapping[i]] for i in range(len(index_counts)))
print(new_index_counts, 'new_index_counts')
# and sort out the batches
self.batches = list((sum(new_index_counts[:i]), new_index_counts[i]) for i in range(self.num_mesh_objs))
print(self.batches, 'batches')
# vertices
self.vert_bounds = list((sum(self.v_stream_lens[:i]), sum(self.v_stream_lens[:i+1])-1) for i in range(self.num_mesh_objs))
# bounded hull data
self.hull_bounds = list((sum(self.ch_stream_lens[:i]), sum(self.ch_stream_lens[:i+1])) for i in range(self.num_mesh_objs))
print(self.hull_bounds, 'bound hulls')
# CollisionIndexCount
# go over all the meshes and add all the batches. Not sure if this can be optimised to be obtained earier... Probably...
ColIndexCount = 0
for i in range(len(self.Model.ListOfMeshes)):
mesh = self.Model.ListOfMeshes[i]
if mesh._Type == 'COLLISION':
if mesh.CType == 'Mesh':
#print(index_counts, sum(index_counts[:i]), index_counts[i])
ColIndexCount += index_counts[i]
# we need to fix up the index stream as the numbering needs to be continuous across all the streams
k = 0 # additive constant
for i in range(self.num_mesh_objs):
# first add k to every element in every tuple
curr_max = 0
for j in range(self.i_stream_lens[i]):
self.index_stream[i][j] = tuple(k + index for index in self.index_stream[i][j])
local_max = max(self.index_stream[i][j])
if local_max > curr_max:
curr_max = local_max
# now we set k to be the current max and this is added on to the next set.
k = curr_max + 1
#print(self.index_stream)
print('reshuffling indexes')
# now we need to re-shuffle the index data
new_index_data = list(range(self.num_mesh_objs)) # just fill with numbers for now, they will be overridden
for i in range(self.num_mesh_objs):
new_index_data[self.index_mapping[i]] = self.index_stream[i]
self.index_stream = new_index_data
#print(self.index_stream)
# First we need to find the length of each stream.
self.GeometryData['IndexCount'] = 3*sum(self.i_stream_lens)
self.GeometryData['VertexCount'] = sum(self.v_stream_lens)
self.GeometryData['CollisionIndexCount'] = ColIndexCount
self.GeometryData['MeshVertRStart'] = list(self.vert_bounds[i][0] for i in range(len(self.vert_bounds)))
self.GeometryData['MeshVertREnd'] = list(self.vert_bounds[i][1] for i in range(len(self.vert_bounds)))
self.GeometryData['BoundHullVertSt'] = list(self.hull_bounds[i][0] for i in range(len(self.hull_bounds)))
self.GeometryData['BoundHullVertEd'] = list(self.hull_bounds[i][1] for i in range(len(self.hull_bounds)))
if self.GeometryData['IndexCount'] > 2**16:
self.GeometryData['Indices16Bit'] = 0
else:
self.GeometryData['Indices16Bit'] = 1
# might as well also populate the hull data since we only need to union it all:
hull_data = []
for vert_list in self.chvertex_stream:
hull_data += vert_list
self.GeometryData['BoundHullVerts'] = hull_data
def process_nodes(self):
# this will iterate first over the list of mesh data and apply all the required information to the Mesh and Mesh-type Collisions objects.
# We will then iterate over the entire tree of children to the Model and give them any required information
# Go through every node
for obj in traverse(self.Model):
if obj.IsMesh:
i = obj.ID # this is the index associated with the Mesh-type object earlier to avoid having to iterate through everything twice effectively
mesh_obj = self.Model.ListOfMeshes[i]
data = dict()
data['BATCHSTART'] = self.batches[self.index_mapping[i]][0]
data['BATCHCOUNT'] = self.batches[self.index_mapping[i]][1]
data['VERTRSTART'] = self.vert_bounds[i][0]
data['VERTREND'] = self.vert_bounds[i][1]
data['BOUNDHULLST'] = self.hull_bounds[i][0]
data['BOUNDHULLED'] = self.hull_bounds[i][1]
if mesh_obj._Type == 'MESH':
# we only care about entity and material data for Mesh Objects
if type(mesh_obj.Material) != str:
if mesh_obj.Material is not None:
mat_name = str(mesh_obj.Material['Name'])
print('material name: {}'.format(mat_name))
data['MATERIAL'] = os.path.join(self.path, mat_name.upper()) + '.MATERIAL.MBIN'
else:
data['MATERIAL'] = ''
else:
data['MATERIAL'] = mesh_obj.Material
if obj.HasAttachment:
if obj.EntityData is not None:
ent_path = os.path.join(self.ent_path, str(obj.EntityPath).upper())
data['ATTACHMENT'] = '{}.ENTITY.MBIN'.format(ent_path)
# also need to generate the entity data
AttachmentData = TkAttachmentData(Components = list(obj.EntityData.values())[0]) # this is the actual entity data
AttachmentData.make_elements(main=True)
# also write the entity file now too as we don't need to do anything else to it
AttachmentData.tree.write("{}.ENTITY.exml".format(ent_path))
else:
data['ATTACHMENT'] = obj.EntityPath
else:
if obj._Type == 'LOCATOR':
if obj.HasAttachment:
if obj.EntityData is not None:
ent_path = os.path.join(self.ent_path, str(obj.EntityPath).upper())
data = {'ATTACHMENT': '{}.ENTITY.MBIN'.format(ent_path)}
# also need to generate the entity data
AttachmentData = TkAttachmentData(Components = list(obj.EntityData.values())[0]) # this is the actual entity data
AttachmentData.make_elements(main=True)
# also write the entity file now too as we don't need to do anything else to it
AttachmentData.tree.write("{}.ENTITY.exml".format(ent_path))
else:
data = {'ATTACHMENT': obj.EntityPath}
else:
data = None
elif obj._Type == 'COLLISION':
if obj.CType == 'Box':
data = {'WIDTH': obj.Width, 'HEIGHT': obj.Height, 'DEPTH': obj.Depth}
elif obj.CType == 'Sphere':
data = {'RADIUS': obj.Radius}
elif obj.CType == 'Capsule' or obj.CType == 'Cylinder':
data = {'RADIUS': obj.Radius, 'HEIGHT': obj.Height}
elif obj._Type == 'MODEL':
obj.Name = self.path
data = {'GEOMETRY': str(self.path) + ".GEOMETRY.MBIN"}
elif obj._Type == 'REFERENCE':
data = None
elif obj._Type == 'LIGHT':
data = None
obj.create_attributes(data)
def create_vertex_layouts(self):
# sort out what streams are given and create appropriate vertex layouts
VertexElements = List()
SmallVertexElements = List()
ElementCount = len(self.stream_list)
for sID in self.stream_list:
# sID is the SemanticID
# if sID in [0,1]:
Offset = 8*self.stream_list.index(sID)
VertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 5131,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
""" for the INT_2_10_10_10_REV stuff
elif sID in [2,3]:
Offset = 16 + (sID - 2)*4
VertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 36255,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
"""
for sID in [0,1]:
Offset = 8*sID
SmallVertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 5131,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
# fow now just make the small vert and vert layouts the same
""" Vertex layout needs to be changed for the new normals/tangent format"""
self.GeometryData['VertexLayout'] = TkVertexLayout(ElementCount = ElementCount,
Stride = 8*ElementCount, # this is 6* is normals and tangents, and 5* if just normals
PlatformData = "",
VertexElements = VertexElements)
self.GeometryData['SmallVertexLayout'] = TkVertexLayout(ElementCount = 2,
Stride = 16,
PlatformData = "",
VertexElements = SmallVertexElements)
def | (self):
# this combines all the input streams into one single stream with the correct offset etc as specified by the VertexLayout
# This also flattens each stream
# Again, for now just make the SmallVertexStream the same. Later, change this.
VertexStream = array('f')
SmallVertexStream = array('f')
for i in range(self.num_mesh_objs):
for j in range(self.v_stream_lens[i]):
for sID in self.stream_list:
# get the j^th 4Vector element of i^th object of the corresponding stream as specified by the stream list.
# As self.stream_list is ordered this will be mixed in the correct way wrt. the VertexLayouts
try:
VertexStream.extend(self.Model.ListOfMeshes[i].__dict__[REV_SEMANTICS[sID]][j])
if sID in [0,1]:
SmallVertexStream.extend(self.Model.ListOfMeshes[i].__dict__[REV_SEMANTICS[sID]][j])
except:
# in the case this fails there is an index error caused by collisions. In this case just add a default value
VertexStream.extend((0,0,0,1))
self.GeometryData['VertexStream'] = VertexStream
self.GeometryData['SmallVertexStream'] = SmallVertexStream
# finally we can also flatten the index stream:
IndexBuffer = array('I')
for obj in self.index_stream:
for tri in obj:
IndexBuffer.extend(tri)
self.GeometryData['IndexBuffer'] = IndexBuffer
def get_bounds(self):
# this analyses the vertex stream and finds the smallest bounding box corners.
self.GeometryData['MeshAABBMin'] = List()
self.GeometryData['MeshAABBMax'] = List()
for obj in self.Model.ListOfMeshes:
v_stream = obj.Vertices
x_verts = [i[0] for i in v_stream]
y_verts = [i[1] for i in v_stream]
z_verts = [i[2] for i in v_stream]
x_bounds = (min(x_verts), max(x_verts))
y_bounds = (min(y_verts), max(y_verts))
z_bounds = (min(z_verts), max(z_verts))
self.GeometryData['MeshAABBMin'].append(Vector4f(x=x_bounds[0], y=y_bounds[0], z=z_bounds[0], t=1))
self.GeometryData['MeshAABBMax'].append(Vector4f(x=x_bounds[1], y=y_bounds[1], z=z_bounds[1], t=1))
def process_materials(self):
# process the material data and gives the textures the correct paths
for material in self.materials:
if type(material) != str:
# in this case we are given actual material data, not just a string path location
samplers = material['Samplers']
# this will have the order Diffuse, Masks, Normal and be a List
if samplers is not None:
for sample in samplers.subElements:
# this will be a TkMaterialSampler object
t_path = str(sample['Map']) # this should be the current absolute path to the image, we want to move it to the correct relative path
new_path = os.path.join(self.texture_path, os.path.basename(t_path).upper())
try:
copy2(t_path, new_path)
except FileNotFoundError:
# in this case the path is probably broken, just set as empty if it wasn't before
new_path = ""
f_name, ext = os.path.splitext(new_path)
if ext != '.DDS' and ext != '':
# TODO: add code here to convert the image to dds format
# in this case the file is not in the correct format. Put the correct file extension in the material file
print('The file {} needs to be converted to .DDS format (file extention to be capitalised also!)'.format(new_path))
sample['Map'] = f_name + '.DDS'
else:
# all good in this case
sample['Map'] = new_path
def write(self):
# write each of the exml files.
#self.TkGeometryData.tree.write("{}.GEOMETRY.exml".format(self.path))
mbinc = mbinCompiler(self.TkGeometryData, "{}.GEOMETRY.MBIN.PC".format(self.path))
mbinc.serialise()
self.TkSceneNodeData.tree.write("{}.SCENE.exml".format(self.path))
if self.descriptor is not None:
self.descriptor.tree.write("{}.DESCRIPTOR.exml".format(self.path))
for material in self.materials:
if type(material) != str:
material.tree.write("{0}.MATERIAL.exml".format(os.path.join(self.path, str(material['Name']).upper())))
if len(self.anim_data) != 0:
if len(self.anim_data) == 1:
list(self.anim_data.values())[0].tree.write("{}.ANIM.exml".format(self.path)) # get the value and output it
else:
for name in list(self.anim_data.keys()):
self.anim_data[name].tree.write(os.path.join(self.anims_path, "{}.ANIM.exml".format(name.upper())))
def convert_to_mbin(self):
# passes all the files produced by
print('Converting all .exml files to .mbin. Please wait while this finishes.')
for directory, folders, files in os.walk(os.path.join(BASEPATH, self.directory)):
for file in files:
location = os.path.join(directory, file)
if os.path.splitext(location)[1] == '.exml':
retcode = subprocess.call(["MBINCompiler.exe", location])
if retcode == 0:
os.remove(location)
if __name__ == '__main__':
main_obj = Model(Name = 'Square')
def_mat = TkMaterialData(Name = 'Square1mat')
Obj1 = Mesh(Name = 'Square1',
Vertices = [(-1,1,0,1), (1,1,0,1), (1,-1,0,1), (-1,-1,0,1)],
Indexes = [(0,1,2), (2,3,0)],
UVs = [(0.3,0,0,1), (0,0.2,0,1), (0,0.1,0,1), (0.1,0.2,0,1)],
Material = def_mat)
main_obj.add_child(Obj1)
Obj1_col = Collision(Name = 'Square1_col', CollisionType = 'Mesh', Vertices = [(-4,4,0,1),(4,4,0,1), (4,-4,0,1), (-4,-4,0,1)],
Indexes = [(0,1,2), (2,3,0)])
Obj1.add_child(Obj1_col)
Obj2 = Mesh(Name = 'Square2',
Vertices = [(2,1,0,1), (4,1,0,1), (4,-1,0,1), (2,-1,0,1)],
Indexes = [(0,1,2), (2,3,0)],
UVs = [(0.5,0,0,1), (0.2,0.2,0,1), (0,0.5,0,1), (0.1,0.2,0,1)])
Obj1.add_child(Obj2)
loc = Locator(Name = 'testloc')
Obj2.add_child(loc)
ref = Reference(Name = 'testref')
loc.add_child(ref)
ref2 = Reference(Name = 'testref2')
loc.add_child(ref2)
light = Light(Name = 'ls', Intensity = 200000, Colour = (0.4, 0.6, 0.2))
Obj1.add_child(light)
main = Create_Data('SQUARE', 'TEST', main_obj)
from lxml import etree
def prettyPrintXml(xmlFilePathToPrettyPrint):
assert xmlFilePathToPrettyPrint is not None
parser = etree.XMLParser(resolve_entities=False, strip_cdata=False)
document = etree.parse(xmlFilePathToPrettyPrint, parser)
document.write(xmlFilePathToPrettyPrint, xml_declaration='<?xml version="1.0" encoding="utf-8"?>', pretty_print=True, encoding='utf-8')
#prettyPrintXml('TEST\SQUARE.GEOMETRY.exml')
#prettyPrintXml('TEST\SQUARE.SCENE.exml')
#prettyPrintXml('TEST\SQUARE\SQUARE_SQUARE.MATERIAL.exml')
| mix_streams | identifier_name |
main.py | #!/usr/bin/env python
"""Process the 3d model data and create required files for NMS.
This function will take all the data provided by the blender script and create a number of
.exml files that contain all the data required by the game to view the 3d model created.
"""
__author__ = "monkeyman192"
__credits__ = ["monkeyman192", "gregkwaste"]
from classes import *
import os
import subprocess
from LOOKUPS import *
from shutil import copy2
from array import array
from mbincompiler import mbinCompiler
BASEPATH = 'CUSTOMMODELS'
def traverse(obj):
# a custom generator to iterate over the tree of all the children on the scene (including the Model object)
# this returns objects from the branches inwards (which *shouldn't* be a problem...)
for child in obj.Children:
for subvalue in traverse(child):
yield subvalue
else:
yield obj
# simple function to take a list and move the entry at the ith index to the first place
def movetofront(lst, i):
k = lst.pop(i) # this will break if i > len(lst)...
return [k] + lst
class Create_Data():
def __init__(self, name, directory, model, anim_data = dict(), descriptor = None, **commands):
"""
name - the name of the file we want to create. Most entities within will have a name derived from this.
directory - the full relative location of where the scene file will be located.
model - The Model object that contains all the child nodes (of a number of different types)
"""
self.name = name # this is the name of the file
self.directory = directory # the path that the file is supposed to be located at
self.Model = model # this is the main model file for the entire scene.
self.anim_data = anim_data # animation data (defaults to None)
self.descriptor = descriptor
self.fix_names()
# assign each of the input streams to a variable
self.index_stream = []
self.vertex_stream = []
self.uv_stream = []
self.n_stream = []
self.t_stream = []
self.chvertex_stream = []
self.materials = set() # this will hopefully mean that there will be at most one copy of each unique TkMaterialData struct in the set
#self.Entities = [] # a list of any extra properties to go in each entity
# extract the streams from the mesh objects.
index = 0
for mesh in self.Model.ListOfMeshes:
self.index_stream.append(mesh.Indexes)
self.vertex_stream.append(mesh.Vertices)
self.uv_stream.append(mesh.UVs)
self.n_stream.append(mesh.Normals)
self.t_stream.append(mesh.Tangents)
self.chvertex_stream.append(mesh.CHVerts)
# also add in the material data to the list
if mesh.Material is not None:
self.materials.add(mesh.Material)
mesh.ID = index # assign the index location of the data to the Object so that it knows where its data is
index += 1
#for obj in self.Model.ListOfEntities:
# self.Entities.append(obj.EntityData)
self.num_mesh_objs = index # this is the total number of objects that have mesh data
self.mesh_data = [dict()]*self.num_mesh_objs # an empty list of dicts that will ber populated then each entry will
# be given back to the correct Mesh or Collision object
self.preprocess_streams()
# generate some variables relating to the paths
self.path = os.path.join(BASEPATH, self.directory, self.name) # the path location including the file name.
self.texture_path = os.path.join(self.path, 'TEXTURES')
self.anims_path = os.path.join(BASEPATH, self.directory, 'ANIMS')
self.ent_path = os.path.join(self.path, 'ENTITIES') # path location of the entity folder. Calling makedirs of this will ensure all the folders are made in one go
self.create_paths()
# This dictionary contains all the information for the geometry file
self.GeometryData = dict()
# This will just be some default entity with physics data
self.TkAttachmentData = TkAttachmentData() # this is created with the Physics Component Data by default
self.TkAttachmentData.make_elements(main=True)
self.process_data()
self.get_bounds()
self.create_vertex_layouts() # this creates the VertexLayout and SmallVertexLayout properties
# Material defaults
self.process_materials()
self.process_nodes()
self.mix_streams() # make this last to make sure flattening each stream doesn't affect other data.
# Assign each of the class objects that contain all of the data their data
self.TkGeometryData = TkGeometryData(**self.GeometryData)
self.TkGeometryData.make_elements(main=True)
self.Model.construct_data()
self.TkSceneNodeData = self.Model.get_data()
self.TkSceneNodeData.make_elements(main=True) # get the model to create all the required data and this will continue on down the tree
if len(self.descriptor) != 0:
self.descriptor = self.descriptor.to_exml()
self.descriptor.make_elements(main = True)
else:
self.descriptor = None
for material in self.materials:
if type(material) != str:
material.make_elements(main=True)
for anim_name in list(self.anim_data.keys()):
self.anim_data[anim_name].make_elements(main=True)
# write all the files
self.write()
# convert all the created exml files to mbin files
if not commands.get('dont_compile', False):
self.convert_to_mbin()
def create_paths(self):
# check whether the require paths exist and make them
if not os.path.exists(self.ent_path):
os.makedirs(self.ent_path)
if not os.path.exists(self.texture_path):
os.makedirs(self.texture_path)
if not os.path.exists(self.anims_path):
os.makedirs(self.anims_path)
def preprocess_streams(self):
# this will iterate through the Mesh objects and check that each of them has the same number of input streams. Any that don't will be flagged and a message will be raised
streams = set()
for mesh in self.Model.ListOfMeshes:
# first find all the streams over all the meshes that have been provided
streams = streams.union(mesh.provided_streams)
for mesh in self.Model.ListOfMeshes:
# next go back over the list and compare. If an entry isn't in the list of provided streams print a messge (maybe make a new error for this to be raised?)
diff = streams.difference(mesh.provided_streams)
if diff != set():
print('ERROR! Object {0} is missing the streams: {1}'.format(mesh.Name, diff))
if 'Vertices' in diff or 'Indexes' in diff:
print('CRITICAL ERROR! No vertex and/or index data provided for {} Object'.format(mesh.Name))
self.stream_list = list(SEMANTICS[x] for x in streams.difference({'Indexes'}))
self.stream_list.sort()
# secondly this will generate two lists containing the individual lengths of each stream
self.i_stream_lens = list()
self.v_stream_lens = list()
self.ch_stream_lens = list()
# to fix 1.3x mesh collisions, we need to make all the mesh collisions have their indexes first
# we require a mapping to know which is which though
self.index_mapping = list(range(len(self.Model.ListOfMeshes))) # the unchanged mapping
for i in range(len(self.Model.ListOfMeshes)):
mesh = self.Model.ListOfMeshes[i]
if mesh._Type == 'COLLISION':
if mesh.CType == 'Mesh':
self.index_mapping = movetofront(self.index_mapping, i) # move the index it is now located at so we can construct it correctly in the scene
print(self.index_mapping, 'index_mapping')
# populate the lists containing the lengths of each individual stream
for index in range(self.num_mesh_objs):
self.i_stream_lens.append(len(self.index_stream[index]))
self.v_stream_lens.append(len(self.vertex_stream[index]))
self.ch_stream_lens.append(len(self.chvertex_stream[index]))
def fix_names(self):
# just make sure that the name and path is all in uppercase
self.name = self.name.upper()
self.directory = self.directory.upper()
def process_data(self):
# This will do the main processing of the different streams.
# indexes
index_counts = list(3*x for x in self.i_stream_lens) # the total number of index points in each object
print(index_counts, 'index counts')
# now, re-order the indexes:
new_index_counts = list(index_counts[self.index_mapping[i]] for i in range(len(index_counts)))
print(new_index_counts, 'new_index_counts')
# and sort out the batches
self.batches = list((sum(new_index_counts[:i]), new_index_counts[i]) for i in range(self.num_mesh_objs))
print(self.batches, 'batches')
# vertices
self.vert_bounds = list((sum(self.v_stream_lens[:i]), sum(self.v_stream_lens[:i+1])-1) for i in range(self.num_mesh_objs))
# bounded hull data
self.hull_bounds = list((sum(self.ch_stream_lens[:i]), sum(self.ch_stream_lens[:i+1])) for i in range(self.num_mesh_objs))
print(self.hull_bounds, 'bound hulls')
# CollisionIndexCount
# go over all the meshes and add all the batches. Not sure if this can be optimised to be obtained earier... Probably...
ColIndexCount = 0
for i in range(len(self.Model.ListOfMeshes)):
mesh = self.Model.ListOfMeshes[i]
if mesh._Type == 'COLLISION':
if mesh.CType == 'Mesh':
#print(index_counts, sum(index_counts[:i]), index_counts[i])
ColIndexCount += index_counts[i]
# we need to fix up the index stream as the numbering needs to be continuous across all the streams
k = 0 # additive constant
for i in range(self.num_mesh_objs):
# first add k to every element in every tuple
curr_max = 0
for j in range(self.i_stream_lens[i]):
self.index_stream[i][j] = tuple(k + index for index in self.index_stream[i][j])
local_max = max(self.index_stream[i][j])
if local_max > curr_max:
curr_max = local_max
# now we set k to be the current max and this is added on to the next set.
k = curr_max + 1
#print(self.index_stream)
print('reshuffling indexes')
# now we need to re-shuffle the index data
new_index_data = list(range(self.num_mesh_objs)) # just fill with numbers for now, they will be overridden
for i in range(self.num_mesh_objs):
new_index_data[self.index_mapping[i]] = self.index_stream[i]
self.index_stream = new_index_data
#print(self.index_stream)
# First we need to find the length of each stream.
self.GeometryData['IndexCount'] = 3*sum(self.i_stream_lens)
self.GeometryData['VertexCount'] = sum(self.v_stream_lens)
self.GeometryData['CollisionIndexCount'] = ColIndexCount
self.GeometryData['MeshVertRStart'] = list(self.vert_bounds[i][0] for i in range(len(self.vert_bounds)))
self.GeometryData['MeshVertREnd'] = list(self.vert_bounds[i][1] for i in range(len(self.vert_bounds)))
self.GeometryData['BoundHullVertSt'] = list(self.hull_bounds[i][0] for i in range(len(self.hull_bounds)))
self.GeometryData['BoundHullVertEd'] = list(self.hull_bounds[i][1] for i in range(len(self.hull_bounds)))
if self.GeometryData['IndexCount'] > 2**16:
self.GeometryData['Indices16Bit'] = 0
else:
self.GeometryData['Indices16Bit'] = 1
# might as well also populate the hull data since we only need to union it all:
hull_data = []
for vert_list in self.chvertex_stream:
hull_data += vert_list
self.GeometryData['BoundHullVerts'] = hull_data
def process_nodes(self):
# this will iterate first over the list of mesh data and apply all the required information to the Mesh and Mesh-type Collisions objects.
# We will then iterate over the entire tree of children to the Model and give them any required information
# Go through every node
for obj in traverse(self.Model):
if obj.IsMesh:
i = obj.ID # this is the index associated with the Mesh-type object earlier to avoid having to iterate through everything twice effectively
mesh_obj = self.Model.ListOfMeshes[i]
data = dict()
data['BATCHSTART'] = self.batches[self.index_mapping[i]][0]
data['BATCHCOUNT'] = self.batches[self.index_mapping[i]][1]
data['VERTRSTART'] = self.vert_bounds[i][0]
data['VERTREND'] = self.vert_bounds[i][1]
data['BOUNDHULLST'] = self.hull_bounds[i][0]
data['BOUNDHULLED'] = self.hull_bounds[i][1]
if mesh_obj._Type == 'MESH':
# we only care about entity and material data for Mesh Objects
if type(mesh_obj.Material) != str:
if mesh_obj.Material is not None:
mat_name = str(mesh_obj.Material['Name'])
print('material name: {}'.format(mat_name))
data['MATERIAL'] = os.path.join(self.path, mat_name.upper()) + '.MATERIAL.MBIN'
else:
data['MATERIAL'] = ''
else:
data['MATERIAL'] = mesh_obj.Material
if obj.HasAttachment:
if obj.EntityData is not None:
ent_path = os.path.join(self.ent_path, str(obj.EntityPath).upper())
data['ATTACHMENT'] = '{}.ENTITY.MBIN'.format(ent_path)
# also need to generate the entity data
AttachmentData = TkAttachmentData(Components = list(obj.EntityData.values())[0]) # this is the actual entity data
AttachmentData.make_elements(main=True)
# also write the entity file now too as we don't need to do anything else to it
AttachmentData.tree.write("{}.ENTITY.exml".format(ent_path))
else:
data['ATTACHMENT'] = obj.EntityPath
else:
if obj._Type == 'LOCATOR':
if obj.HasAttachment:
if obj.EntityData is not None:
ent_path = os.path.join(self.ent_path, str(obj.EntityPath).upper())
data = {'ATTACHMENT': '{}.ENTITY.MBIN'.format(ent_path)}
# also need to generate the entity data
AttachmentData = TkAttachmentData(Components = list(obj.EntityData.values())[0]) # this is the actual entity data
AttachmentData.make_elements(main=True)
# also write the entity file now too as we don't need to do anything else to it
AttachmentData.tree.write("{}.ENTITY.exml".format(ent_path))
else:
data = {'ATTACHMENT': obj.EntityPath}
else:
data = None
elif obj._Type == 'COLLISION':
if obj.CType == 'Box':
data = {'WIDTH': obj.Width, 'HEIGHT': obj.Height, 'DEPTH': obj.Depth}
elif obj.CType == 'Sphere':
data = {'RADIUS': obj.Radius}
elif obj.CType == 'Capsule' or obj.CType == 'Cylinder':
data = {'RADIUS': obj.Radius, 'HEIGHT': obj.Height}
elif obj._Type == 'MODEL':
obj.Name = self.path
data = {'GEOMETRY': str(self.path) + ".GEOMETRY.MBIN"}
elif obj._Type == 'REFERENCE':
data = None
elif obj._Type == 'LIGHT':
data = None
obj.create_attributes(data)
def create_vertex_layouts(self):
# sort out what streams are given and create appropriate vertex layouts
VertexElements = List()
SmallVertexElements = List()
ElementCount = len(self.stream_list)
for sID in self.stream_list:
# sID is the SemanticID
# if sID in [0,1]:
Offset = 8*self.stream_list.index(sID)
VertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 5131,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
""" for the INT_2_10_10_10_REV stuff
elif sID in [2,3]:
Offset = 16 + (sID - 2)*4
VertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 36255,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
"""
for sID in [0,1]:
Offset = 8*sID
SmallVertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 5131,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
# fow now just make the small vert and vert layouts the same
""" Vertex layout needs to be changed for the new normals/tangent format"""
self.GeometryData['VertexLayout'] = TkVertexLayout(ElementCount = ElementCount,
Stride = 8*ElementCount, # this is 6* is normals and tangents, and 5* if just normals
PlatformData = "",
VertexElements = VertexElements)
self.GeometryData['SmallVertexLayout'] = TkVertexLayout(ElementCount = 2,
Stride = 16,
PlatformData = "",
VertexElements = SmallVertexElements)
def mix_streams(self):
# this combines all the input streams into one single stream with the correct offset etc as specified by the VertexLayout
# This also flattens each stream
# Again, for now just make the SmallVertexStream the same. Later, change this.
VertexStream = array('f')
SmallVertexStream = array('f')
for i in range(self.num_mesh_objs):
for j in range(self.v_stream_lens[i]):
for sID in self.stream_list:
# get the j^th 4Vector element of i^th object of the corresponding stream as specified by the stream list.
# As self.stream_list is ordered this will be mixed in the correct way wrt. the VertexLayouts
try:
VertexStream.extend(self.Model.ListOfMeshes[i].__dict__[REV_SEMANTICS[sID]][j])
if sID in [0,1]:
SmallVertexStream.extend(self.Model.ListOfMeshes[i].__dict__[REV_SEMANTICS[sID]][j])
except:
# in the case this fails there is an index error caused by collisions. In this case just add a default value
VertexStream.extend((0,0,0,1))
self.GeometryData['VertexStream'] = VertexStream
self.GeometryData['SmallVertexStream'] = SmallVertexStream
# finally we can also flatten the index stream:
IndexBuffer = array('I')
for obj in self.index_stream:
for tri in obj:
IndexBuffer.extend(tri)
self.GeometryData['IndexBuffer'] = IndexBuffer
def get_bounds(self):
# this analyses the vertex stream and finds the smallest bounding box corners.
self.GeometryData['MeshAABBMin'] = List()
self.GeometryData['MeshAABBMax'] = List()
for obj in self.Model.ListOfMeshes:
v_stream = obj.Vertices
x_verts = [i[0] for i in v_stream]
y_verts = [i[1] for i in v_stream] | x_bounds = (min(x_verts), max(x_verts))
y_bounds = (min(y_verts), max(y_verts))
z_bounds = (min(z_verts), max(z_verts))
self.GeometryData['MeshAABBMin'].append(Vector4f(x=x_bounds[0], y=y_bounds[0], z=z_bounds[0], t=1))
self.GeometryData['MeshAABBMax'].append(Vector4f(x=x_bounds[1], y=y_bounds[1], z=z_bounds[1], t=1))
def process_materials(self):
# process the material data and gives the textures the correct paths
for material in self.materials:
if type(material) != str:
# in this case we are given actual material data, not just a string path location
samplers = material['Samplers']
# this will have the order Diffuse, Masks, Normal and be a List
if samplers is not None:
for sample in samplers.subElements:
# this will be a TkMaterialSampler object
t_path = str(sample['Map']) # this should be the current absolute path to the image, we want to move it to the correct relative path
new_path = os.path.join(self.texture_path, os.path.basename(t_path).upper())
try:
copy2(t_path, new_path)
except FileNotFoundError:
# in this case the path is probably broken, just set as empty if it wasn't before
new_path = ""
f_name, ext = os.path.splitext(new_path)
if ext != '.DDS' and ext != '':
# TODO: add code here to convert the image to dds format
# in this case the file is not in the correct format. Put the correct file extension in the material file
print('The file {} needs to be converted to .DDS format (file extention to be capitalised also!)'.format(new_path))
sample['Map'] = f_name + '.DDS'
else:
# all good in this case
sample['Map'] = new_path
def write(self):
# write each of the exml files.
#self.TkGeometryData.tree.write("{}.GEOMETRY.exml".format(self.path))
mbinc = mbinCompiler(self.TkGeometryData, "{}.GEOMETRY.MBIN.PC".format(self.path))
mbinc.serialise()
self.TkSceneNodeData.tree.write("{}.SCENE.exml".format(self.path))
if self.descriptor is not None:
self.descriptor.tree.write("{}.DESCRIPTOR.exml".format(self.path))
for material in self.materials:
if type(material) != str:
material.tree.write("{0}.MATERIAL.exml".format(os.path.join(self.path, str(material['Name']).upper())))
if len(self.anim_data) != 0:
if len(self.anim_data) == 1:
list(self.anim_data.values())[0].tree.write("{}.ANIM.exml".format(self.path)) # get the value and output it
else:
for name in list(self.anim_data.keys()):
self.anim_data[name].tree.write(os.path.join(self.anims_path, "{}.ANIM.exml".format(name.upper())))
def convert_to_mbin(self):
# passes all the files produced by
print('Converting all .exml files to .mbin. Please wait while this finishes.')
for directory, folders, files in os.walk(os.path.join(BASEPATH, self.directory)):
for file in files:
location = os.path.join(directory, file)
if os.path.splitext(location)[1] == '.exml':
retcode = subprocess.call(["MBINCompiler.exe", location])
if retcode == 0:
os.remove(location)
if __name__ == '__main__':
main_obj = Model(Name = 'Square')
def_mat = TkMaterialData(Name = 'Square1mat')
Obj1 = Mesh(Name = 'Square1',
Vertices = [(-1,1,0,1), (1,1,0,1), (1,-1,0,1), (-1,-1,0,1)],
Indexes = [(0,1,2), (2,3,0)],
UVs = [(0.3,0,0,1), (0,0.2,0,1), (0,0.1,0,1), (0.1,0.2,0,1)],
Material = def_mat)
main_obj.add_child(Obj1)
Obj1_col = Collision(Name = 'Square1_col', CollisionType = 'Mesh', Vertices = [(-4,4,0,1),(4,4,0,1), (4,-4,0,1), (-4,-4,0,1)],
Indexes = [(0,1,2), (2,3,0)])
Obj1.add_child(Obj1_col)
Obj2 = Mesh(Name = 'Square2',
Vertices = [(2,1,0,1), (4,1,0,1), (4,-1,0,1), (2,-1,0,1)],
Indexes = [(0,1,2), (2,3,0)],
UVs = [(0.5,0,0,1), (0.2,0.2,0,1), (0,0.5,0,1), (0.1,0.2,0,1)])
Obj1.add_child(Obj2)
loc = Locator(Name = 'testloc')
Obj2.add_child(loc)
ref = Reference(Name = 'testref')
loc.add_child(ref)
ref2 = Reference(Name = 'testref2')
loc.add_child(ref2)
light = Light(Name = 'ls', Intensity = 200000, Colour = (0.4, 0.6, 0.2))
Obj1.add_child(light)
main = Create_Data('SQUARE', 'TEST', main_obj)
from lxml import etree
def prettyPrintXml(xmlFilePathToPrettyPrint):
assert xmlFilePathToPrettyPrint is not None
parser = etree.XMLParser(resolve_entities=False, strip_cdata=False)
document = etree.parse(xmlFilePathToPrettyPrint, parser)
document.write(xmlFilePathToPrettyPrint, xml_declaration='<?xml version="1.0" encoding="utf-8"?>', pretty_print=True, encoding='utf-8')
#prettyPrintXml('TEST\SQUARE.GEOMETRY.exml')
#prettyPrintXml('TEST\SQUARE.SCENE.exml')
#prettyPrintXml('TEST\SQUARE\SQUARE_SQUARE.MATERIAL.exml') | z_verts = [i[2] for i in v_stream] | random_line_split |
main.py | #!/usr/bin/env python
"""Process the 3d model data and create required files for NMS.
This function will take all the data provided by the blender script and create a number of
.exml files that contain all the data required by the game to view the 3d model created.
"""
__author__ = "monkeyman192"
__credits__ = ["monkeyman192", "gregkwaste"]
from classes import *
import os
import subprocess
from LOOKUPS import *
from shutil import copy2
from array import array
from mbincompiler import mbinCompiler
BASEPATH = 'CUSTOMMODELS'
def traverse(obj):
# a custom generator to iterate over the tree of all the children on the scene (including the Model object)
# this returns objects from the branches inwards (which *shouldn't* be a problem...)
for child in obj.Children:
for subvalue in traverse(child):
yield subvalue
else:
yield obj
# simple function to take a list and move the entry at the ith index to the first place
def movetofront(lst, i):
k = lst.pop(i) # this will break if i > len(lst)...
return [k] + lst
class Create_Data():
def __init__(self, name, directory, model, anim_data = dict(), descriptor = None, **commands):
"""
name - the name of the file we want to create. Most entities within will have a name derived from this.
directory - the full relative location of where the scene file will be located.
model - The Model object that contains all the child nodes (of a number of different types)
"""
self.name = name # this is the name of the file
self.directory = directory # the path that the file is supposed to be located at
self.Model = model # this is the main model file for the entire scene.
self.anim_data = anim_data # animation data (defaults to None)
self.descriptor = descriptor
self.fix_names()
# assign each of the input streams to a variable
self.index_stream = []
self.vertex_stream = []
self.uv_stream = []
self.n_stream = []
self.t_stream = []
self.chvertex_stream = []
self.materials = set() # this will hopefully mean that there will be at most one copy of each unique TkMaterialData struct in the set
#self.Entities = [] # a list of any extra properties to go in each entity
# extract the streams from the mesh objects.
index = 0
for mesh in self.Model.ListOfMeshes:
self.index_stream.append(mesh.Indexes)
self.vertex_stream.append(mesh.Vertices)
self.uv_stream.append(mesh.UVs)
self.n_stream.append(mesh.Normals)
self.t_stream.append(mesh.Tangents)
self.chvertex_stream.append(mesh.CHVerts)
# also add in the material data to the list
if mesh.Material is not None:
self.materials.add(mesh.Material)
mesh.ID = index # assign the index location of the data to the Object so that it knows where its data is
index += 1
#for obj in self.Model.ListOfEntities:
# self.Entities.append(obj.EntityData)
self.num_mesh_objs = index # this is the total number of objects that have mesh data
self.mesh_data = [dict()]*self.num_mesh_objs # an empty list of dicts that will ber populated then each entry will
# be given back to the correct Mesh or Collision object
self.preprocess_streams()
# generate some variables relating to the paths
self.path = os.path.join(BASEPATH, self.directory, self.name) # the path location including the file name.
self.texture_path = os.path.join(self.path, 'TEXTURES')
self.anims_path = os.path.join(BASEPATH, self.directory, 'ANIMS')
self.ent_path = os.path.join(self.path, 'ENTITIES') # path location of the entity folder. Calling makedirs of this will ensure all the folders are made in one go
self.create_paths()
# This dictionary contains all the information for the geometry file
self.GeometryData = dict()
# This will just be some default entity with physics data
self.TkAttachmentData = TkAttachmentData() # this is created with the Physics Component Data by default
self.TkAttachmentData.make_elements(main=True)
self.process_data()
self.get_bounds()
self.create_vertex_layouts() # this creates the VertexLayout and SmallVertexLayout properties
# Material defaults
self.process_materials()
self.process_nodes()
self.mix_streams() # make this last to make sure flattening each stream doesn't affect other data.
# Assign each of the class objects that contain all of the data their data
self.TkGeometryData = TkGeometryData(**self.GeometryData)
self.TkGeometryData.make_elements(main=True)
self.Model.construct_data()
self.TkSceneNodeData = self.Model.get_data()
self.TkSceneNodeData.make_elements(main=True) # get the model to create all the required data and this will continue on down the tree
if len(self.descriptor) != 0:
self.descriptor = self.descriptor.to_exml()
self.descriptor.make_elements(main = True)
else:
self.descriptor = None
for material in self.materials:
if type(material) != str:
material.make_elements(main=True)
for anim_name in list(self.anim_data.keys()):
self.anim_data[anim_name].make_elements(main=True)
# write all the files
self.write()
# convert all the created exml files to mbin files
if not commands.get('dont_compile', False):
self.convert_to_mbin()
def create_paths(self):
# check whether the require paths exist and make them
if not os.path.exists(self.ent_path):
os.makedirs(self.ent_path)
if not os.path.exists(self.texture_path):
os.makedirs(self.texture_path)
if not os.path.exists(self.anims_path):
os.makedirs(self.anims_path)
def preprocess_streams(self):
# this will iterate through the Mesh objects and check that each of them has the same number of input streams. Any that don't will be flagged and a message will be raised
streams = set()
for mesh in self.Model.ListOfMeshes:
# first find all the streams over all the meshes that have been provided
streams = streams.union(mesh.provided_streams)
for mesh in self.Model.ListOfMeshes:
# next go back over the list and compare. If an entry isn't in the list of provided streams print a messge (maybe make a new error for this to be raised?)
diff = streams.difference(mesh.provided_streams)
if diff != set():
print('ERROR! Object {0} is missing the streams: {1}'.format(mesh.Name, diff))
if 'Vertices' in diff or 'Indexes' in diff:
print('CRITICAL ERROR! No vertex and/or index data provided for {} Object'.format(mesh.Name))
self.stream_list = list(SEMANTICS[x] for x in streams.difference({'Indexes'}))
self.stream_list.sort()
# secondly this will generate two lists containing the individual lengths of each stream
self.i_stream_lens = list()
self.v_stream_lens = list()
self.ch_stream_lens = list()
# to fix 1.3x mesh collisions, we need to make all the mesh collisions have their indexes first
# we require a mapping to know which is which though
self.index_mapping = list(range(len(self.Model.ListOfMeshes))) # the unchanged mapping
for i in range(len(self.Model.ListOfMeshes)):
mesh = self.Model.ListOfMeshes[i]
if mesh._Type == 'COLLISION':
if mesh.CType == 'Mesh':
|
print(self.index_mapping, 'index_mapping')
# populate the lists containing the lengths of each individual stream
for index in range(self.num_mesh_objs):
self.i_stream_lens.append(len(self.index_stream[index]))
self.v_stream_lens.append(len(self.vertex_stream[index]))
self.ch_stream_lens.append(len(self.chvertex_stream[index]))
def fix_names(self):
# just make sure that the name and path is all in uppercase
self.name = self.name.upper()
self.directory = self.directory.upper()
def process_data(self):
# This will do the main processing of the different streams.
# indexes
index_counts = list(3*x for x in self.i_stream_lens) # the total number of index points in each object
print(index_counts, 'index counts')
# now, re-order the indexes:
new_index_counts = list(index_counts[self.index_mapping[i]] for i in range(len(index_counts)))
print(new_index_counts, 'new_index_counts')
# and sort out the batches
self.batches = list((sum(new_index_counts[:i]), new_index_counts[i]) for i in range(self.num_mesh_objs))
print(self.batches, 'batches')
# vertices
self.vert_bounds = list((sum(self.v_stream_lens[:i]), sum(self.v_stream_lens[:i+1])-1) for i in range(self.num_mesh_objs))
# bounded hull data
self.hull_bounds = list((sum(self.ch_stream_lens[:i]), sum(self.ch_stream_lens[:i+1])) for i in range(self.num_mesh_objs))
print(self.hull_bounds, 'bound hulls')
# CollisionIndexCount
# go over all the meshes and add all the batches. Not sure if this can be optimised to be obtained earier... Probably...
ColIndexCount = 0
for i in range(len(self.Model.ListOfMeshes)):
mesh = self.Model.ListOfMeshes[i]
if mesh._Type == 'COLLISION':
if mesh.CType == 'Mesh':
#print(index_counts, sum(index_counts[:i]), index_counts[i])
ColIndexCount += index_counts[i]
# we need to fix up the index stream as the numbering needs to be continuous across all the streams
k = 0 # additive constant
for i in range(self.num_mesh_objs):
# first add k to every element in every tuple
curr_max = 0
for j in range(self.i_stream_lens[i]):
self.index_stream[i][j] = tuple(k + index for index in self.index_stream[i][j])
local_max = max(self.index_stream[i][j])
if local_max > curr_max:
curr_max = local_max
# now we set k to be the current max and this is added on to the next set.
k = curr_max + 1
#print(self.index_stream)
print('reshuffling indexes')
# now we need to re-shuffle the index data
new_index_data = list(range(self.num_mesh_objs)) # just fill with numbers for now, they will be overridden
for i in range(self.num_mesh_objs):
new_index_data[self.index_mapping[i]] = self.index_stream[i]
self.index_stream = new_index_data
#print(self.index_stream)
# First we need to find the length of each stream.
self.GeometryData['IndexCount'] = 3*sum(self.i_stream_lens)
self.GeometryData['VertexCount'] = sum(self.v_stream_lens)
self.GeometryData['CollisionIndexCount'] = ColIndexCount
self.GeometryData['MeshVertRStart'] = list(self.vert_bounds[i][0] for i in range(len(self.vert_bounds)))
self.GeometryData['MeshVertREnd'] = list(self.vert_bounds[i][1] for i in range(len(self.vert_bounds)))
self.GeometryData['BoundHullVertSt'] = list(self.hull_bounds[i][0] for i in range(len(self.hull_bounds)))
self.GeometryData['BoundHullVertEd'] = list(self.hull_bounds[i][1] for i in range(len(self.hull_bounds)))
if self.GeometryData['IndexCount'] > 2**16:
self.GeometryData['Indices16Bit'] = 0
else:
self.GeometryData['Indices16Bit'] = 1
# might as well also populate the hull data since we only need to union it all:
hull_data = []
for vert_list in self.chvertex_stream:
hull_data += vert_list
self.GeometryData['BoundHullVerts'] = hull_data
def process_nodes(self):
# this will iterate first over the list of mesh data and apply all the required information to the Mesh and Mesh-type Collisions objects.
# We will then iterate over the entire tree of children to the Model and give them any required information
# Go through every node
for obj in traverse(self.Model):
if obj.IsMesh:
i = obj.ID # this is the index associated with the Mesh-type object earlier to avoid having to iterate through everything twice effectively
mesh_obj = self.Model.ListOfMeshes[i]
data = dict()
data['BATCHSTART'] = self.batches[self.index_mapping[i]][0]
data['BATCHCOUNT'] = self.batches[self.index_mapping[i]][1]
data['VERTRSTART'] = self.vert_bounds[i][0]
data['VERTREND'] = self.vert_bounds[i][1]
data['BOUNDHULLST'] = self.hull_bounds[i][0]
data['BOUNDHULLED'] = self.hull_bounds[i][1]
if mesh_obj._Type == 'MESH':
# we only care about entity and material data for Mesh Objects
if type(mesh_obj.Material) != str:
if mesh_obj.Material is not None:
mat_name = str(mesh_obj.Material['Name'])
print('material name: {}'.format(mat_name))
data['MATERIAL'] = os.path.join(self.path, mat_name.upper()) + '.MATERIAL.MBIN'
else:
data['MATERIAL'] = ''
else:
data['MATERIAL'] = mesh_obj.Material
if obj.HasAttachment:
if obj.EntityData is not None:
ent_path = os.path.join(self.ent_path, str(obj.EntityPath).upper())
data['ATTACHMENT'] = '{}.ENTITY.MBIN'.format(ent_path)
# also need to generate the entity data
AttachmentData = TkAttachmentData(Components = list(obj.EntityData.values())[0]) # this is the actual entity data
AttachmentData.make_elements(main=True)
# also write the entity file now too as we don't need to do anything else to it
AttachmentData.tree.write("{}.ENTITY.exml".format(ent_path))
else:
data['ATTACHMENT'] = obj.EntityPath
else:
if obj._Type == 'LOCATOR':
if obj.HasAttachment:
if obj.EntityData is not None:
ent_path = os.path.join(self.ent_path, str(obj.EntityPath).upper())
data = {'ATTACHMENT': '{}.ENTITY.MBIN'.format(ent_path)}
# also need to generate the entity data
AttachmentData = TkAttachmentData(Components = list(obj.EntityData.values())[0]) # this is the actual entity data
AttachmentData.make_elements(main=True)
# also write the entity file now too as we don't need to do anything else to it
AttachmentData.tree.write("{}.ENTITY.exml".format(ent_path))
else:
data = {'ATTACHMENT': obj.EntityPath}
else:
data = None
elif obj._Type == 'COLLISION':
if obj.CType == 'Box':
data = {'WIDTH': obj.Width, 'HEIGHT': obj.Height, 'DEPTH': obj.Depth}
elif obj.CType == 'Sphere':
data = {'RADIUS': obj.Radius}
elif obj.CType == 'Capsule' or obj.CType == 'Cylinder':
data = {'RADIUS': obj.Radius, 'HEIGHT': obj.Height}
elif obj._Type == 'MODEL':
obj.Name = self.path
data = {'GEOMETRY': str(self.path) + ".GEOMETRY.MBIN"}
elif obj._Type == 'REFERENCE':
data = None
elif obj._Type == 'LIGHT':
data = None
obj.create_attributes(data)
def create_vertex_layouts(self):
# sort out what streams are given and create appropriate vertex layouts
VertexElements = List()
SmallVertexElements = List()
ElementCount = len(self.stream_list)
for sID in self.stream_list:
# sID is the SemanticID
# if sID in [0,1]:
Offset = 8*self.stream_list.index(sID)
VertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 5131,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
""" for the INT_2_10_10_10_REV stuff
elif sID in [2,3]:
Offset = 16 + (sID - 2)*4
VertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 36255,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
"""
for sID in [0,1]:
Offset = 8*sID
SmallVertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 5131,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
# fow now just make the small vert and vert layouts the same
""" Vertex layout needs to be changed for the new normals/tangent format"""
self.GeometryData['VertexLayout'] = TkVertexLayout(ElementCount = ElementCount,
Stride = 8*ElementCount, # this is 6* is normals and tangents, and 5* if just normals
PlatformData = "",
VertexElements = VertexElements)
self.GeometryData['SmallVertexLayout'] = TkVertexLayout(ElementCount = 2,
Stride = 16,
PlatformData = "",
VertexElements = SmallVertexElements)
def mix_streams(self):
# this combines all the input streams into one single stream with the correct offset etc as specified by the VertexLayout
# This also flattens each stream
# Again, for now just make the SmallVertexStream the same. Later, change this.
VertexStream = array('f')
SmallVertexStream = array('f')
for i in range(self.num_mesh_objs):
for j in range(self.v_stream_lens[i]):
for sID in self.stream_list:
# get the j^th 4Vector element of i^th object of the corresponding stream as specified by the stream list.
# As self.stream_list is ordered this will be mixed in the correct way wrt. the VertexLayouts
try:
VertexStream.extend(self.Model.ListOfMeshes[i].__dict__[REV_SEMANTICS[sID]][j])
if sID in [0,1]:
SmallVertexStream.extend(self.Model.ListOfMeshes[i].__dict__[REV_SEMANTICS[sID]][j])
except:
# in the case this fails there is an index error caused by collisions. In this case just add a default value
VertexStream.extend((0,0,0,1))
self.GeometryData['VertexStream'] = VertexStream
self.GeometryData['SmallVertexStream'] = SmallVertexStream
# finally we can also flatten the index stream:
IndexBuffer = array('I')
for obj in self.index_stream:
for tri in obj:
IndexBuffer.extend(tri)
self.GeometryData['IndexBuffer'] = IndexBuffer
def get_bounds(self):
# this analyses the vertex stream and finds the smallest bounding box corners.
self.GeometryData['MeshAABBMin'] = List()
self.GeometryData['MeshAABBMax'] = List()
for obj in self.Model.ListOfMeshes:
v_stream = obj.Vertices
x_verts = [i[0] for i in v_stream]
y_verts = [i[1] for i in v_stream]
z_verts = [i[2] for i in v_stream]
x_bounds = (min(x_verts), max(x_verts))
y_bounds = (min(y_verts), max(y_verts))
z_bounds = (min(z_verts), max(z_verts))
self.GeometryData['MeshAABBMin'].append(Vector4f(x=x_bounds[0], y=y_bounds[0], z=z_bounds[0], t=1))
self.GeometryData['MeshAABBMax'].append(Vector4f(x=x_bounds[1], y=y_bounds[1], z=z_bounds[1], t=1))
def process_materials(self):
# process the material data and gives the textures the correct paths
for material in self.materials:
if type(material) != str:
# in this case we are given actual material data, not just a string path location
samplers = material['Samplers']
# this will have the order Diffuse, Masks, Normal and be a List
if samplers is not None:
for sample in samplers.subElements:
# this will be a TkMaterialSampler object
t_path = str(sample['Map']) # this should be the current absolute path to the image, we want to move it to the correct relative path
new_path = os.path.join(self.texture_path, os.path.basename(t_path).upper())
try:
copy2(t_path, new_path)
except FileNotFoundError:
# in this case the path is probably broken, just set as empty if it wasn't before
new_path = ""
f_name, ext = os.path.splitext(new_path)
if ext != '.DDS' and ext != '':
# TODO: add code here to convert the image to dds format
# in this case the file is not in the correct format. Put the correct file extension in the material file
print('The file {} needs to be converted to .DDS format (file extention to be capitalised also!)'.format(new_path))
sample['Map'] = f_name + '.DDS'
else:
# all good in this case
sample['Map'] = new_path
def write(self):
# write each of the exml files.
#self.TkGeometryData.tree.write("{}.GEOMETRY.exml".format(self.path))
mbinc = mbinCompiler(self.TkGeometryData, "{}.GEOMETRY.MBIN.PC".format(self.path))
mbinc.serialise()
self.TkSceneNodeData.tree.write("{}.SCENE.exml".format(self.path))
if self.descriptor is not None:
self.descriptor.tree.write("{}.DESCRIPTOR.exml".format(self.path))
for material in self.materials:
if type(material) != str:
material.tree.write("{0}.MATERIAL.exml".format(os.path.join(self.path, str(material['Name']).upper())))
if len(self.anim_data) != 0:
if len(self.anim_data) == 1:
list(self.anim_data.values())[0].tree.write("{}.ANIM.exml".format(self.path)) # get the value and output it
else:
for name in list(self.anim_data.keys()):
self.anim_data[name].tree.write(os.path.join(self.anims_path, "{}.ANIM.exml".format(name.upper())))
def convert_to_mbin(self):
# passes all the files produced by
print('Converting all .exml files to .mbin. Please wait while this finishes.')
for directory, folders, files in os.walk(os.path.join(BASEPATH, self.directory)):
for file in files:
location = os.path.join(directory, file)
if os.path.splitext(location)[1] == '.exml':
retcode = subprocess.call(["MBINCompiler.exe", location])
if retcode == 0:
os.remove(location)
if __name__ == '__main__':
main_obj = Model(Name = 'Square')
def_mat = TkMaterialData(Name = 'Square1mat')
Obj1 = Mesh(Name = 'Square1',
Vertices = [(-1,1,0,1), (1,1,0,1), (1,-1,0,1), (-1,-1,0,1)],
Indexes = [(0,1,2), (2,3,0)],
UVs = [(0.3,0,0,1), (0,0.2,0,1), (0,0.1,0,1), (0.1,0.2,0,1)],
Material = def_mat)
main_obj.add_child(Obj1)
Obj1_col = Collision(Name = 'Square1_col', CollisionType = 'Mesh', Vertices = [(-4,4,0,1),(4,4,0,1), (4,-4,0,1), (-4,-4,0,1)],
Indexes = [(0,1,2), (2,3,0)])
Obj1.add_child(Obj1_col)
Obj2 = Mesh(Name = 'Square2',
Vertices = [(2,1,0,1), (4,1,0,1), (4,-1,0,1), (2,-1,0,1)],
Indexes = [(0,1,2), (2,3,0)],
UVs = [(0.5,0,0,1), (0.2,0.2,0,1), (0,0.5,0,1), (0.1,0.2,0,1)])
Obj1.add_child(Obj2)
loc = Locator(Name = 'testloc')
Obj2.add_child(loc)
ref = Reference(Name = 'testref')
loc.add_child(ref)
ref2 = Reference(Name = 'testref2')
loc.add_child(ref2)
light = Light(Name = 'ls', Intensity = 200000, Colour = (0.4, 0.6, 0.2))
Obj1.add_child(light)
main = Create_Data('SQUARE', 'TEST', main_obj)
from lxml import etree
def prettyPrintXml(xmlFilePathToPrettyPrint):
assert xmlFilePathToPrettyPrint is not None
parser = etree.XMLParser(resolve_entities=False, strip_cdata=False)
document = etree.parse(xmlFilePathToPrettyPrint, parser)
document.write(xmlFilePathToPrettyPrint, xml_declaration='<?xml version="1.0" encoding="utf-8"?>', pretty_print=True, encoding='utf-8')
#prettyPrintXml('TEST\SQUARE.GEOMETRY.exml')
#prettyPrintXml('TEST\SQUARE.SCENE.exml')
#prettyPrintXml('TEST\SQUARE\SQUARE_SQUARE.MATERIAL.exml')
| self.index_mapping = movetofront(self.index_mapping, i) # move the index it is now located at so we can construct it correctly in the scene | conditional_block |
main.py | #!/usr/bin/env python
"""Process the 3d model data and create required files for NMS.
This function will take all the data provided by the blender script and create a number of
.exml files that contain all the data required by the game to view the 3d model created.
"""
__author__ = "monkeyman192"
__credits__ = ["monkeyman192", "gregkwaste"]
from classes import *
import os
import subprocess
from LOOKUPS import *
from shutil import copy2
from array import array
from mbincompiler import mbinCompiler
BASEPATH = 'CUSTOMMODELS'
def traverse(obj):
# a custom generator to iterate over the tree of all the children on the scene (including the Model object)
# this returns objects from the branches inwards (which *shouldn't* be a problem...)
|
# simple function to take a list and move the entry at the ith index to the first place
def movetofront(lst, i):
k = lst.pop(i) # this will break if i > len(lst)...
return [k] + lst
class Create_Data():
def __init__(self, name, directory, model, anim_data = dict(), descriptor = None, **commands):
"""
name - the name of the file we want to create. Most entities within will have a name derived from this.
directory - the full relative location of where the scene file will be located.
model - The Model object that contains all the child nodes (of a number of different types)
"""
self.name = name # this is the name of the file
self.directory = directory # the path that the file is supposed to be located at
self.Model = model # this is the main model file for the entire scene.
self.anim_data = anim_data # animation data (defaults to None)
self.descriptor = descriptor
self.fix_names()
# assign each of the input streams to a variable
self.index_stream = []
self.vertex_stream = []
self.uv_stream = []
self.n_stream = []
self.t_stream = []
self.chvertex_stream = []
self.materials = set() # this will hopefully mean that there will be at most one copy of each unique TkMaterialData struct in the set
#self.Entities = [] # a list of any extra properties to go in each entity
# extract the streams from the mesh objects.
index = 0
for mesh in self.Model.ListOfMeshes:
self.index_stream.append(mesh.Indexes)
self.vertex_stream.append(mesh.Vertices)
self.uv_stream.append(mesh.UVs)
self.n_stream.append(mesh.Normals)
self.t_stream.append(mesh.Tangents)
self.chvertex_stream.append(mesh.CHVerts)
# also add in the material data to the list
if mesh.Material is not None:
self.materials.add(mesh.Material)
mesh.ID = index # assign the index location of the data to the Object so that it knows where its data is
index += 1
#for obj in self.Model.ListOfEntities:
# self.Entities.append(obj.EntityData)
self.num_mesh_objs = index # this is the total number of objects that have mesh data
self.mesh_data = [dict()]*self.num_mesh_objs # an empty list of dicts that will ber populated then each entry will
# be given back to the correct Mesh or Collision object
self.preprocess_streams()
# generate some variables relating to the paths
self.path = os.path.join(BASEPATH, self.directory, self.name) # the path location including the file name.
self.texture_path = os.path.join(self.path, 'TEXTURES')
self.anims_path = os.path.join(BASEPATH, self.directory, 'ANIMS')
self.ent_path = os.path.join(self.path, 'ENTITIES') # path location of the entity folder. Calling makedirs of this will ensure all the folders are made in one go
self.create_paths()
# This dictionary contains all the information for the geometry file
self.GeometryData = dict()
# This will just be some default entity with physics data
self.TkAttachmentData = TkAttachmentData() # this is created with the Physics Component Data by default
self.TkAttachmentData.make_elements(main=True)
self.process_data()
self.get_bounds()
self.create_vertex_layouts() # this creates the VertexLayout and SmallVertexLayout properties
# Material defaults
self.process_materials()
self.process_nodes()
self.mix_streams() # make this last to make sure flattening each stream doesn't affect other data.
# Assign each of the class objects that contain all of the data their data
self.TkGeometryData = TkGeometryData(**self.GeometryData)
self.TkGeometryData.make_elements(main=True)
self.Model.construct_data()
self.TkSceneNodeData = self.Model.get_data()
self.TkSceneNodeData.make_elements(main=True) # get the model to create all the required data and this will continue on down the tree
if len(self.descriptor) != 0:
self.descriptor = self.descriptor.to_exml()
self.descriptor.make_elements(main = True)
else:
self.descriptor = None
for material in self.materials:
if type(material) != str:
material.make_elements(main=True)
for anim_name in list(self.anim_data.keys()):
self.anim_data[anim_name].make_elements(main=True)
# write all the files
self.write()
# convert all the created exml files to mbin files
if not commands.get('dont_compile', False):
self.convert_to_mbin()
def create_paths(self):
# check whether the require paths exist and make them
if not os.path.exists(self.ent_path):
os.makedirs(self.ent_path)
if not os.path.exists(self.texture_path):
os.makedirs(self.texture_path)
if not os.path.exists(self.anims_path):
os.makedirs(self.anims_path)
def preprocess_streams(self):
# this will iterate through the Mesh objects and check that each of them has the same number of input streams. Any that don't will be flagged and a message will be raised
streams = set()
for mesh in self.Model.ListOfMeshes:
# first find all the streams over all the meshes that have been provided
streams = streams.union(mesh.provided_streams)
for mesh in self.Model.ListOfMeshes:
# next go back over the list and compare. If an entry isn't in the list of provided streams print a messge (maybe make a new error for this to be raised?)
diff = streams.difference(mesh.provided_streams)
if diff != set():
print('ERROR! Object {0} is missing the streams: {1}'.format(mesh.Name, diff))
if 'Vertices' in diff or 'Indexes' in diff:
print('CRITICAL ERROR! No vertex and/or index data provided for {} Object'.format(mesh.Name))
self.stream_list = list(SEMANTICS[x] for x in streams.difference({'Indexes'}))
self.stream_list.sort()
# secondly this will generate two lists containing the individual lengths of each stream
self.i_stream_lens = list()
self.v_stream_lens = list()
self.ch_stream_lens = list()
# to fix 1.3x mesh collisions, we need to make all the mesh collisions have their indexes first
# we require a mapping to know which is which though
self.index_mapping = list(range(len(self.Model.ListOfMeshes))) # the unchanged mapping
for i in range(len(self.Model.ListOfMeshes)):
mesh = self.Model.ListOfMeshes[i]
if mesh._Type == 'COLLISION':
if mesh.CType == 'Mesh':
self.index_mapping = movetofront(self.index_mapping, i) # move the index it is now located at so we can construct it correctly in the scene
print(self.index_mapping, 'index_mapping')
# populate the lists containing the lengths of each individual stream
for index in range(self.num_mesh_objs):
self.i_stream_lens.append(len(self.index_stream[index]))
self.v_stream_lens.append(len(self.vertex_stream[index]))
self.ch_stream_lens.append(len(self.chvertex_stream[index]))
def fix_names(self):
# just make sure that the name and path is all in uppercase
self.name = self.name.upper()
self.directory = self.directory.upper()
def process_data(self):
# This will do the main processing of the different streams.
# indexes
index_counts = list(3*x for x in self.i_stream_lens) # the total number of index points in each object
print(index_counts, 'index counts')
# now, re-order the indexes:
new_index_counts = list(index_counts[self.index_mapping[i]] for i in range(len(index_counts)))
print(new_index_counts, 'new_index_counts')
# and sort out the batches
self.batches = list((sum(new_index_counts[:i]), new_index_counts[i]) for i in range(self.num_mesh_objs))
print(self.batches, 'batches')
# vertices
self.vert_bounds = list((sum(self.v_stream_lens[:i]), sum(self.v_stream_lens[:i+1])-1) for i in range(self.num_mesh_objs))
# bounded hull data
self.hull_bounds = list((sum(self.ch_stream_lens[:i]), sum(self.ch_stream_lens[:i+1])) for i in range(self.num_mesh_objs))
print(self.hull_bounds, 'bound hulls')
# CollisionIndexCount
# go over all the meshes and add all the batches. Not sure if this can be optimised to be obtained earier... Probably...
ColIndexCount = 0
for i in range(len(self.Model.ListOfMeshes)):
mesh = self.Model.ListOfMeshes[i]
if mesh._Type == 'COLLISION':
if mesh.CType == 'Mesh':
#print(index_counts, sum(index_counts[:i]), index_counts[i])
ColIndexCount += index_counts[i]
# we need to fix up the index stream as the numbering needs to be continuous across all the streams
k = 0 # additive constant
for i in range(self.num_mesh_objs):
# first add k to every element in every tuple
curr_max = 0
for j in range(self.i_stream_lens[i]):
self.index_stream[i][j] = tuple(k + index for index in self.index_stream[i][j])
local_max = max(self.index_stream[i][j])
if local_max > curr_max:
curr_max = local_max
# now we set k to be the current max and this is added on to the next set.
k = curr_max + 1
#print(self.index_stream)
print('reshuffling indexes')
# now we need to re-shuffle the index data
new_index_data = list(range(self.num_mesh_objs)) # just fill with numbers for now, they will be overridden
for i in range(self.num_mesh_objs):
new_index_data[self.index_mapping[i]] = self.index_stream[i]
self.index_stream = new_index_data
#print(self.index_stream)
# First we need to find the length of each stream.
self.GeometryData['IndexCount'] = 3*sum(self.i_stream_lens)
self.GeometryData['VertexCount'] = sum(self.v_stream_lens)
self.GeometryData['CollisionIndexCount'] = ColIndexCount
self.GeometryData['MeshVertRStart'] = list(self.vert_bounds[i][0] for i in range(len(self.vert_bounds)))
self.GeometryData['MeshVertREnd'] = list(self.vert_bounds[i][1] for i in range(len(self.vert_bounds)))
self.GeometryData['BoundHullVertSt'] = list(self.hull_bounds[i][0] for i in range(len(self.hull_bounds)))
self.GeometryData['BoundHullVertEd'] = list(self.hull_bounds[i][1] for i in range(len(self.hull_bounds)))
if self.GeometryData['IndexCount'] > 2**16:
self.GeometryData['Indices16Bit'] = 0
else:
self.GeometryData['Indices16Bit'] = 1
# might as well also populate the hull data since we only need to union it all:
hull_data = []
for vert_list in self.chvertex_stream:
hull_data += vert_list
self.GeometryData['BoundHullVerts'] = hull_data
def process_nodes(self):
# this will iterate first over the list of mesh data and apply all the required information to the Mesh and Mesh-type Collisions objects.
# We will then iterate over the entire tree of children to the Model and give them any required information
# Go through every node
for obj in traverse(self.Model):
if obj.IsMesh:
i = obj.ID # this is the index associated with the Mesh-type object earlier to avoid having to iterate through everything twice effectively
mesh_obj = self.Model.ListOfMeshes[i]
data = dict()
data['BATCHSTART'] = self.batches[self.index_mapping[i]][0]
data['BATCHCOUNT'] = self.batches[self.index_mapping[i]][1]
data['VERTRSTART'] = self.vert_bounds[i][0]
data['VERTREND'] = self.vert_bounds[i][1]
data['BOUNDHULLST'] = self.hull_bounds[i][0]
data['BOUNDHULLED'] = self.hull_bounds[i][1]
if mesh_obj._Type == 'MESH':
# we only care about entity and material data for Mesh Objects
if type(mesh_obj.Material) != str:
if mesh_obj.Material is not None:
mat_name = str(mesh_obj.Material['Name'])
print('material name: {}'.format(mat_name))
data['MATERIAL'] = os.path.join(self.path, mat_name.upper()) + '.MATERIAL.MBIN'
else:
data['MATERIAL'] = ''
else:
data['MATERIAL'] = mesh_obj.Material
if obj.HasAttachment:
if obj.EntityData is not None:
ent_path = os.path.join(self.ent_path, str(obj.EntityPath).upper())
data['ATTACHMENT'] = '{}.ENTITY.MBIN'.format(ent_path)
# also need to generate the entity data
AttachmentData = TkAttachmentData(Components = list(obj.EntityData.values())[0]) # this is the actual entity data
AttachmentData.make_elements(main=True)
# also write the entity file now too as we don't need to do anything else to it
AttachmentData.tree.write("{}.ENTITY.exml".format(ent_path))
else:
data['ATTACHMENT'] = obj.EntityPath
else:
if obj._Type == 'LOCATOR':
if obj.HasAttachment:
if obj.EntityData is not None:
ent_path = os.path.join(self.ent_path, str(obj.EntityPath).upper())
data = {'ATTACHMENT': '{}.ENTITY.MBIN'.format(ent_path)}
# also need to generate the entity data
AttachmentData = TkAttachmentData(Components = list(obj.EntityData.values())[0]) # this is the actual entity data
AttachmentData.make_elements(main=True)
# also write the entity file now too as we don't need to do anything else to it
AttachmentData.tree.write("{}.ENTITY.exml".format(ent_path))
else:
data = {'ATTACHMENT': obj.EntityPath}
else:
data = None
elif obj._Type == 'COLLISION':
if obj.CType == 'Box':
data = {'WIDTH': obj.Width, 'HEIGHT': obj.Height, 'DEPTH': obj.Depth}
elif obj.CType == 'Sphere':
data = {'RADIUS': obj.Radius}
elif obj.CType == 'Capsule' or obj.CType == 'Cylinder':
data = {'RADIUS': obj.Radius, 'HEIGHT': obj.Height}
elif obj._Type == 'MODEL':
obj.Name = self.path
data = {'GEOMETRY': str(self.path) + ".GEOMETRY.MBIN"}
elif obj._Type == 'REFERENCE':
data = None
elif obj._Type == 'LIGHT':
data = None
obj.create_attributes(data)
def create_vertex_layouts(self):
# sort out what streams are given and create appropriate vertex layouts
VertexElements = List()
SmallVertexElements = List()
ElementCount = len(self.stream_list)
for sID in self.stream_list:
# sID is the SemanticID
# if sID in [0,1]:
Offset = 8*self.stream_list.index(sID)
VertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 5131,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
""" for the INT_2_10_10_10_REV stuff
elif sID in [2,3]:
Offset = 16 + (sID - 2)*4
VertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 36255,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
"""
for sID in [0,1]:
Offset = 8*sID
SmallVertexElements.append(TkVertexElement(SemanticID = sID,
Size = 4,
Type = 5131,
Offset = Offset,
Normalise = 0,
Instancing = "PerVertex",
PlatformData = ""))
# fow now just make the small vert and vert layouts the same
""" Vertex layout needs to be changed for the new normals/tangent format"""
self.GeometryData['VertexLayout'] = TkVertexLayout(ElementCount = ElementCount,
Stride = 8*ElementCount, # this is 6* is normals and tangents, and 5* if just normals
PlatformData = "",
VertexElements = VertexElements)
self.GeometryData['SmallVertexLayout'] = TkVertexLayout(ElementCount = 2,
Stride = 16,
PlatformData = "",
VertexElements = SmallVertexElements)
def mix_streams(self):
# this combines all the input streams into one single stream with the correct offset etc as specified by the VertexLayout
# This also flattens each stream
# Again, for now just make the SmallVertexStream the same. Later, change this.
VertexStream = array('f')
SmallVertexStream = array('f')
for i in range(self.num_mesh_objs):
for j in range(self.v_stream_lens[i]):
for sID in self.stream_list:
# get the j^th 4Vector element of i^th object of the corresponding stream as specified by the stream list.
# As self.stream_list is ordered this will be mixed in the correct way wrt. the VertexLayouts
try:
VertexStream.extend(self.Model.ListOfMeshes[i].__dict__[REV_SEMANTICS[sID]][j])
if sID in [0,1]:
SmallVertexStream.extend(self.Model.ListOfMeshes[i].__dict__[REV_SEMANTICS[sID]][j])
except:
# in the case this fails there is an index error caused by collisions. In this case just add a default value
VertexStream.extend((0,0,0,1))
self.GeometryData['VertexStream'] = VertexStream
self.GeometryData['SmallVertexStream'] = SmallVertexStream
# finally we can also flatten the index stream:
IndexBuffer = array('I')
for obj in self.index_stream:
for tri in obj:
IndexBuffer.extend(tri)
self.GeometryData['IndexBuffer'] = IndexBuffer
def get_bounds(self):
# this analyses the vertex stream and finds the smallest bounding box corners.
self.GeometryData['MeshAABBMin'] = List()
self.GeometryData['MeshAABBMax'] = List()
for obj in self.Model.ListOfMeshes:
v_stream = obj.Vertices
x_verts = [i[0] for i in v_stream]
y_verts = [i[1] for i in v_stream]
z_verts = [i[2] for i in v_stream]
x_bounds = (min(x_verts), max(x_verts))
y_bounds = (min(y_verts), max(y_verts))
z_bounds = (min(z_verts), max(z_verts))
self.GeometryData['MeshAABBMin'].append(Vector4f(x=x_bounds[0], y=y_bounds[0], z=z_bounds[0], t=1))
self.GeometryData['MeshAABBMax'].append(Vector4f(x=x_bounds[1], y=y_bounds[1], z=z_bounds[1], t=1))
def process_materials(self):
# process the material data and gives the textures the correct paths
for material in self.materials:
if type(material) != str:
# in this case we are given actual material data, not just a string path location
samplers = material['Samplers']
# this will have the order Diffuse, Masks, Normal and be a List
if samplers is not None:
for sample in samplers.subElements:
# this will be a TkMaterialSampler object
t_path = str(sample['Map']) # this should be the current absolute path to the image, we want to move it to the correct relative path
new_path = os.path.join(self.texture_path, os.path.basename(t_path).upper())
try:
copy2(t_path, new_path)
except FileNotFoundError:
# in this case the path is probably broken, just set as empty if it wasn't before
new_path = ""
f_name, ext = os.path.splitext(new_path)
if ext != '.DDS' and ext != '':
# TODO: add code here to convert the image to dds format
# in this case the file is not in the correct format. Put the correct file extension in the material file
print('The file {} needs to be converted to .DDS format (file extention to be capitalised also!)'.format(new_path))
sample['Map'] = f_name + '.DDS'
else:
# all good in this case
sample['Map'] = new_path
def write(self):
# write each of the exml files.
#self.TkGeometryData.tree.write("{}.GEOMETRY.exml".format(self.path))
mbinc = mbinCompiler(self.TkGeometryData, "{}.GEOMETRY.MBIN.PC".format(self.path))
mbinc.serialise()
self.TkSceneNodeData.tree.write("{}.SCENE.exml".format(self.path))
if self.descriptor is not None:
self.descriptor.tree.write("{}.DESCRIPTOR.exml".format(self.path))
for material in self.materials:
if type(material) != str:
material.tree.write("{0}.MATERIAL.exml".format(os.path.join(self.path, str(material['Name']).upper())))
if len(self.anim_data) != 0:
if len(self.anim_data) == 1:
list(self.anim_data.values())[0].tree.write("{}.ANIM.exml".format(self.path)) # get the value and output it
else:
for name in list(self.anim_data.keys()):
self.anim_data[name].tree.write(os.path.join(self.anims_path, "{}.ANIM.exml".format(name.upper())))
def convert_to_mbin(self):
# passes all the files produced by
print('Converting all .exml files to .mbin. Please wait while this finishes.')
for directory, folders, files in os.walk(os.path.join(BASEPATH, self.directory)):
for file in files:
location = os.path.join(directory, file)
if os.path.splitext(location)[1] == '.exml':
retcode = subprocess.call(["MBINCompiler.exe", location])
if retcode == 0:
os.remove(location)
if __name__ == '__main__':
main_obj = Model(Name = 'Square')
def_mat = TkMaterialData(Name = 'Square1mat')
Obj1 = Mesh(Name = 'Square1',
Vertices = [(-1,1,0,1), (1,1,0,1), (1,-1,0,1), (-1,-1,0,1)],
Indexes = [(0,1,2), (2,3,0)],
UVs = [(0.3,0,0,1), (0,0.2,0,1), (0,0.1,0,1), (0.1,0.2,0,1)],
Material = def_mat)
main_obj.add_child(Obj1)
Obj1_col = Collision(Name = 'Square1_col', CollisionType = 'Mesh', Vertices = [(-4,4,0,1),(4,4,0,1), (4,-4,0,1), (-4,-4,0,1)],
Indexes = [(0,1,2), (2,3,0)])
Obj1.add_child(Obj1_col)
Obj2 = Mesh(Name = 'Square2',
Vertices = [(2,1,0,1), (4,1,0,1), (4,-1,0,1), (2,-1,0,1)],
Indexes = [(0,1,2), (2,3,0)],
UVs = [(0.5,0,0,1), (0.2,0.2,0,1), (0,0.5,0,1), (0.1,0.2,0,1)])
Obj1.add_child(Obj2)
loc = Locator(Name = 'testloc')
Obj2.add_child(loc)
ref = Reference(Name = 'testref')
loc.add_child(ref)
ref2 = Reference(Name = 'testref2')
loc.add_child(ref2)
light = Light(Name = 'ls', Intensity = 200000, Colour = (0.4, 0.6, 0.2))
Obj1.add_child(light)
main = Create_Data('SQUARE', 'TEST', main_obj)
from lxml import etree
def prettyPrintXml(xmlFilePathToPrettyPrint):
assert xmlFilePathToPrettyPrint is not None
parser = etree.XMLParser(resolve_entities=False, strip_cdata=False)
document = etree.parse(xmlFilePathToPrettyPrint, parser)
document.write(xmlFilePathToPrettyPrint, xml_declaration='<?xml version="1.0" encoding="utf-8"?>', pretty_print=True, encoding='utf-8')
#prettyPrintXml('TEST\SQUARE.GEOMETRY.exml')
#prettyPrintXml('TEST\SQUARE.SCENE.exml')
#prettyPrintXml('TEST\SQUARE\SQUARE_SQUARE.MATERIAL.exml')
| for child in obj.Children:
for subvalue in traverse(child):
yield subvalue
else:
yield obj | identifier_body |
lib.rs | // The MIT License (MIT)
// Copyright (c) 2018 Matrix.Zhang <113445886@qq.com>
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This library supports Alibaba's Dayu SMS SDK version of '2017-05-25'.
//!
//! ## Basic usage
//!
//! ```rust
//!use dayu::Dayu;
//!use serde_json::json;
//!
//!let dayu = Dayu::new()
//! .set_access_key("access_key")
//! .set_access_secret("access_secret")
//! .set_sign_name("阿里云测试短信");
//!dayu.sms_send(&["138XXXXXXXX"], "SMS_123456", Some(&json!({"customer": "Rust"}))).await.unwrap();
//! ```
use std::{
collections::BTreeMap,
convert::AsRef,
fmt::{self, Display, Formatter},
};
use chrono::{NaiveDate, Utc};
use futures_util::TryFutureExt;
use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use textnonce::TextNonce;
use thiserror::Error;
use url::Url;
static MAX_PAGE_SIZE: u8 = 50;
static REQUEST_FORMAT: &str = "JSON";
static SIGN_METHOD: &str = "HMAC-SHA1";
static SIGNATURE_VERSION: &str = "1.0";
static VERSION: &str = "2017-05-25";
#[derive(Debug, Error)]
pub enum DayuError {
#[error("config of '{0}' absence")]
ConfigAbsence(&'static str),
#[error("dayu response error: {0}")]
Dayu(DayuFailResponse),
#[error("openssl error: {0}")]
Openssl(#[from] openssl::error::ErrorStack),
#[error("page size '{0}' too large, max is 50")]
PageTooLarge(u8),
#[error("reqwest error: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("serde_json error: {0}")]
SerdeJson(#[from] serde_json::error::Error),
#[error("std io error: {0}")]
Stdio(#[from] std::io::Error),
#[error("textnonce error: {0}")]
TextNonce(String),
#[error("url parse error: {0}")]
UrlParse(#[from] url::ParseError),
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuSendRespon | z_id: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuQueryDetail {
pub phone_num: String,
pub send_date: String,
pub send_status: u8,
pub receive_date: String,
pub template_code: String,
pub content: String,
pub err_code: String,
}
#[derive(Debug, Deserialize)]
pub struct DayuQueryDetails {
#[serde(rename = "SmsSendDetailDTO")]
pub inner: Vec<DayuQueryDetail>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuQueryResponse {
pub total_count: i32,
pub total_page: Option<u8>,
#[serde(rename = "SmsSendDetailDTOs")]
pub details: Option<DayuQueryDetails>,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuFailResponse {
pub code: String,
pub message: String,
pub request_id: String,
}
impl Display for DayuFailResponse {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", serde_json::to_string_pretty(self).unwrap())
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum DayuResponse {
Send(DayuSendResponse),
Query(DayuQueryResponse),
Fail(DayuFailResponse),
}
#[derive(Default, Clone)]
pub struct Dayu {
client: Client,
access_key: String,
access_secret: String,
sign_name: String,
}
fn make_url(dayu: &Dayu, action: &str, params: &[(&str, &str)]) -> Result<Url, DayuError> {
if dayu.access_key.is_empty() {
return Err(DayuError::ConfigAbsence("access_key"));
}
if dayu.access_secret.is_empty() {
return Err(DayuError::ConfigAbsence("access_secret"));
}
if dayu.sign_name.is_empty() {
return Err(DayuError::ConfigAbsence("sign_name"));
}
let timestamp = Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string();
TextNonce::sized(32)
.map_err(DayuError::TextNonce)
.map(|v| v.to_string())
.and_then(|text_nonce| {
let mut map = BTreeMap::new();
map.insert("Format", REQUEST_FORMAT);
map.insert("AccessKeyId", &dayu.access_key);
map.insert("SignatureMethod", SIGN_METHOD);
map.insert("SignatureNonce", &text_nonce);
map.insert("SignatureVersion", SIGNATURE_VERSION);
map.insert("Timestamp", ×tamp);
map.insert("Action", action);
map.insert("SignName", &dayu.sign_name);
map.insert("Version", VERSION);
for &(name, value) in params {
if !value.is_empty() {
map.insert(name, value);
}
}
let mut forms = map
.into_iter()
.map(|(key, value)| (key, urlencoding::encode(value).into_owned()))
.collect::<Vec<(&str, String)>>();
let mut wait_sign = String::from("GET&%2F&");
wait_sign.push_str(
&forms
.iter()
.fold(vec![], |mut wait_sign, &(key, ref value)| {
wait_sign
.push(urlencoding::encode(&format!("{}={}", key, value)).into_owned());
wait_sign
})
.join(&urlencoding::encode("&")),
);
PKey::hmac(format!("{}&", &dayu.access_secret).as_bytes())
.and_then(|pkey| {
Signer::new(MessageDigest::sha1(), &pkey).and_then(|mut signer| {
signer
.update(wait_sign.as_bytes())
.and_then(|_| signer.sign_to_vec())
})
})
.map_err(Into::into)
.map(|ref signature| {
forms.push((
"Signature",
urlencoding::encode(&base64::encode(signature)).into_owned(),
))
})
.and_then(|_| {
Url::parse("https://dysmsapi.aliyuncs.com")
.map_err(Into::into)
.map(|mut url| {
url.set_query(Some(
&forms
.into_iter()
.map(|(key, value)| format!("{}={}", key, value))
.collect::<Vec<String>>()
.join("&"),
));
url
})
})
})
}
macro_rules! do_request {
($dayu:expr, $action:expr, $params:expr, $type:tt) => {{
let url = make_url($dayu, $action, $params)?;
$dayu
.client
.get(url)
.send()
.and_then(|response| response.json::<DayuResponse>())
.await
.map_err(Into::into)
.and_then(|json_response| match json_response {
DayuResponse::$type(v) => Ok(v),
DayuResponse::Fail(fail) => Err(DayuError::Dayu(fail)),
_ => unreachable!(),
})
}};
}
impl Dayu {
/// construct new dayu sdk instance
pub fn new() -> Self {
Self::default()
}
/// set dayu sdk's access key
pub fn set_access_key(mut self, access_key: impl Into<String>) -> Self {
self.access_key = access_key.into();
self
}
/// set dayu sdk's access secret
pub fn set_access_secret(mut self, access_secret: impl Into<String>) -> Self {
self.access_secret = access_secret.into();
self
}
/// set dayu sdk's sign name
pub fn set_sign_name(mut self, sign_name: impl Into<String>) -> Self {
self.sign_name = sign_name.into();
self
}
/// start send sms
/// phones: support multi phone number
/// template_code: SMS TEMPLATE CODE
/// template_param: SMS TEMPLATE PARAMS as JSON
pub async fn sms_send<P: AsRef<str>, T: AsRef<str>>(
&self,
phones: &[P],
template_code: T,
template_param: Option<&Value>,
) -> Result<DayuSendResponse, DayuError> {
let phone_numbers = phones
.iter()
.map(AsRef::as_ref)
.collect::<Vec<&str>>()
.join(",");
let template_param = template_param
.map(|v| serde_json::to_string(v).unwrap())
.unwrap_or_else(String::new);
do_request!(
self,
"SendSms",
&[
("TemplateCode", template_code.as_ref()),
("PhoneNumbers", &phone_numbers),
("TemplateParam", &template_param),
],
Send
)
}
/// query sms send detail
pub async fn sms_query(
&self,
phone_number: &str,
biz_id: Option<&str>,
send_date: NaiveDate,
current_page: u8,
page_size: u8,
) -> Result<DayuQueryResponse, DayuError> {
if page_size > MAX_PAGE_SIZE {
return Err(DayuError::PageTooLarge(page_size));
}
let send_date = send_date.format("%Y%m%d").to_string();
let page_size = page_size.to_string();
let current_page = current_page.to_string();
do_request!(
self,
"QuerySendDetails",
&[
("PhoneNumber", phone_number),
("BizId", biz_id.unwrap_or("")),
("SendDate", &send_date),
("PageSize", &page_size),
("CurrentPage", ¤t_page),
],
Query
)
}
}
| se {
pub bi | identifier_name |
lib.rs | // The MIT License (MIT)
// Copyright (c) 2018 Matrix.Zhang <113445886@qq.com>
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This library supports Alibaba's Dayu SMS SDK version of '2017-05-25'.
//!
//! ## Basic usage
//!
//! ```rust
//!use dayu::Dayu;
//!use serde_json::json;
//!
//!let dayu = Dayu::new()
//! .set_access_key("access_key")
//! .set_access_secret("access_secret")
//! .set_sign_name("阿里云测试短信");
//!dayu.sms_send(&["138XXXXXXXX"], "SMS_123456", Some(&json!({"customer": "Rust"}))).await.unwrap();
//! ```
use std::{
collections::BTreeMap,
convert::AsRef,
fmt::{self, Display, Formatter},
};
use chrono::{NaiveDate, Utc};
use futures_util::TryFutureExt;
use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use textnonce::TextNonce;
use thiserror::Error;
use url::Url;
static MAX_PAGE_SIZE: u8 = 50;
static REQUEST_FORMAT: &str = "JSON";
static SIGN_METHOD: &str = "HMAC-SHA1";
static SIGNATURE_VERSION: &str = "1.0";
static VERSION: &str = "2017-05-25";
#[derive(Debug, Error)]
pub enum DayuError {
#[error("config of '{0}' absence")]
ConfigAbsence(&'static str),
#[error("dayu response error: {0}")]
Dayu(DayuFailResponse),
#[error("openssl error: {0}")]
Openssl(#[from] openssl::error::ErrorStack),
#[error("page size '{0}' too large, max is 50")]
PageTooLarge(u8),
#[error("reqwest error: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("serde_json error: {0}")]
SerdeJson(#[from] serde_json::error::Error),
#[error("std io error: {0}")]
Stdio(#[from] std::io::Error),
#[error("textnonce error: {0}")]
TextNonce(String),
#[error("url parse error: {0}")]
UrlParse(#[from] url::ParseError),
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuSendResponse {
pub biz_id: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuQueryDetail {
pub phone_num: String,
pub send_date: String,
pub send_status: u8,
pub receive_date: String,
pub template_code: String,
pub content: String,
pub err_code: String,
}
#[derive(Debug, Deserialize)]
pub struct DayuQueryDetails {
#[serde(rename = "SmsSendDetailDTO")]
pub inner: Vec<DayuQueryDetail>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuQueryResponse {
pub total_count: i32,
pub total_page: Option<u8>,
#[serde(rename = "SmsSendDetailDTOs")]
pub details: Option<DayuQueryDetails>,
}
| #[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuFailResponse {
pub code: String,
pub message: String,
pub request_id: String,
}
impl Display for DayuFailResponse {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", serde_json::to_string_pretty(self).unwrap())
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum DayuResponse {
Send(DayuSendResponse),
Query(DayuQueryResponse),
Fail(DayuFailResponse),
}
#[derive(Default, Clone)]
pub struct Dayu {
client: Client,
access_key: String,
access_secret: String,
sign_name: String,
}
fn make_url(dayu: &Dayu, action: &str, params: &[(&str, &str)]) -> Result<Url, DayuError> {
if dayu.access_key.is_empty() {
return Err(DayuError::ConfigAbsence("access_key"));
}
if dayu.access_secret.is_empty() {
return Err(DayuError::ConfigAbsence("access_secret"));
}
if dayu.sign_name.is_empty() {
return Err(DayuError::ConfigAbsence("sign_name"));
}
let timestamp = Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string();
TextNonce::sized(32)
.map_err(DayuError::TextNonce)
.map(|v| v.to_string())
.and_then(|text_nonce| {
let mut map = BTreeMap::new();
map.insert("Format", REQUEST_FORMAT);
map.insert("AccessKeyId", &dayu.access_key);
map.insert("SignatureMethod", SIGN_METHOD);
map.insert("SignatureNonce", &text_nonce);
map.insert("SignatureVersion", SIGNATURE_VERSION);
map.insert("Timestamp", ×tamp);
map.insert("Action", action);
map.insert("SignName", &dayu.sign_name);
map.insert("Version", VERSION);
for &(name, value) in params {
if !value.is_empty() {
map.insert(name, value);
}
}
let mut forms = map
.into_iter()
.map(|(key, value)| (key, urlencoding::encode(value).into_owned()))
.collect::<Vec<(&str, String)>>();
let mut wait_sign = String::from("GET&%2F&");
wait_sign.push_str(
&forms
.iter()
.fold(vec![], |mut wait_sign, &(key, ref value)| {
wait_sign
.push(urlencoding::encode(&format!("{}={}", key, value)).into_owned());
wait_sign
})
.join(&urlencoding::encode("&")),
);
PKey::hmac(format!("{}&", &dayu.access_secret).as_bytes())
.and_then(|pkey| {
Signer::new(MessageDigest::sha1(), &pkey).and_then(|mut signer| {
signer
.update(wait_sign.as_bytes())
.and_then(|_| signer.sign_to_vec())
})
})
.map_err(Into::into)
.map(|ref signature| {
forms.push((
"Signature",
urlencoding::encode(&base64::encode(signature)).into_owned(),
))
})
.and_then(|_| {
Url::parse("https://dysmsapi.aliyuncs.com")
.map_err(Into::into)
.map(|mut url| {
url.set_query(Some(
&forms
.into_iter()
.map(|(key, value)| format!("{}={}", key, value))
.collect::<Vec<String>>()
.join("&"),
));
url
})
})
})
}
macro_rules! do_request {
($dayu:expr, $action:expr, $params:expr, $type:tt) => {{
let url = make_url($dayu, $action, $params)?;
$dayu
.client
.get(url)
.send()
.and_then(|response| response.json::<DayuResponse>())
.await
.map_err(Into::into)
.and_then(|json_response| match json_response {
DayuResponse::$type(v) => Ok(v),
DayuResponse::Fail(fail) => Err(DayuError::Dayu(fail)),
_ => unreachable!(),
})
}};
}
impl Dayu {
/// construct new dayu sdk instance
pub fn new() -> Self {
Self::default()
}
/// set dayu sdk's access key
pub fn set_access_key(mut self, access_key: impl Into<String>) -> Self {
self.access_key = access_key.into();
self
}
/// set dayu sdk's access secret
pub fn set_access_secret(mut self, access_secret: impl Into<String>) -> Self {
self.access_secret = access_secret.into();
self
}
/// set dayu sdk's sign name
pub fn set_sign_name(mut self, sign_name: impl Into<String>) -> Self {
self.sign_name = sign_name.into();
self
}
/// start send sms
/// phones: support multi phone number
/// template_code: SMS TEMPLATE CODE
/// template_param: SMS TEMPLATE PARAMS as JSON
pub async fn sms_send<P: AsRef<str>, T: AsRef<str>>(
&self,
phones: &[P],
template_code: T,
template_param: Option<&Value>,
) -> Result<DayuSendResponse, DayuError> {
let phone_numbers = phones
.iter()
.map(AsRef::as_ref)
.collect::<Vec<&str>>()
.join(",");
let template_param = template_param
.map(|v| serde_json::to_string(v).unwrap())
.unwrap_or_else(String::new);
do_request!(
self,
"SendSms",
&[
("TemplateCode", template_code.as_ref()),
("PhoneNumbers", &phone_numbers),
("TemplateParam", &template_param),
],
Send
)
}
/// query sms send detail
pub async fn sms_query(
&self,
phone_number: &str,
biz_id: Option<&str>,
send_date: NaiveDate,
current_page: u8,
page_size: u8,
) -> Result<DayuQueryResponse, DayuError> {
if page_size > MAX_PAGE_SIZE {
return Err(DayuError::PageTooLarge(page_size));
}
let send_date = send_date.format("%Y%m%d").to_string();
let page_size = page_size.to_string();
let current_page = current_page.to_string();
do_request!(
self,
"QuerySendDetails",
&[
("PhoneNumber", phone_number),
("BizId", biz_id.unwrap_or("")),
("SendDate", &send_date),
("PageSize", &page_size),
("CurrentPage", ¤t_page),
],
Query
)
}
} | random_line_split | |
lib.rs | // The MIT License (MIT)
// Copyright (c) 2018 Matrix.Zhang <113445886@qq.com>
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This library supports Alibaba's Dayu SMS SDK version of '2017-05-25'.
//!
//! ## Basic usage
//!
//! ```rust
//!use dayu::Dayu;
//!use serde_json::json;
//!
//!let dayu = Dayu::new()
//! .set_access_key("access_key")
//! .set_access_secret("access_secret")
//! .set_sign_name("阿里云测试短信");
//!dayu.sms_send(&["138XXXXXXXX"], "SMS_123456", Some(&json!({"customer": "Rust"}))).await.unwrap();
//! ```
use std::{
collections::BTreeMap,
convert::AsRef,
fmt::{self, Display, Formatter},
};
use chrono::{NaiveDate, Utc};
use futures_util::TryFutureExt;
use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use textnonce::TextNonce;
use thiserror::Error;
use url::Url;
static MAX_PAGE_SIZE: u8 = 50;
static REQUEST_FORMAT: &str = "JSON";
static SIGN_METHOD: &str = "HMAC-SHA1";
static SIGNATURE_VERSION: &str = "1.0";
static VERSION: &str = "2017-05-25";
#[derive(Debug, Error)]
pub enum DayuError {
#[error("config of '{0}' absence")]
ConfigAbsence(&'static str),
#[error("dayu response error: {0}")]
Dayu(DayuFailResponse),
#[error("openssl error: {0}")]
Openssl(#[from] openssl::error::ErrorStack),
#[error("page size '{0}' too large, max is 50")]
PageTooLarge(u8),
#[error("reqwest error: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("serde_json error: {0}")]
SerdeJson(#[from] serde_json::error::Error),
#[error("std io error: {0}")]
Stdio(#[from] std::io::Error),
#[error("textnonce error: {0}")]
TextNonce(String),
#[error("url parse error: {0}")]
UrlParse(#[from] url::ParseError),
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuSendResponse {
pub biz_id: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuQueryDetail {
pub phone_num: String,
pub send_date: String,
pub send_status: u8,
pub receive_date: String,
pub template_code: String,
pub content: String,
pub err_code: String,
}
#[derive(Debug, Deserialize)]
pub struct DayuQueryDetails {
#[serde(rename = "SmsSendDetailDTO")]
pub inner: Vec<DayuQueryDetail>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuQueryResponse {
pub total_count: i32,
pub total_page: Option<u8>,
#[serde(rename = "SmsSendDetailDTOs")]
pub details: Option<DayuQueryDetails>,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuFailResponse {
pub code: String,
pub message: String,
pub request_id: String,
}
impl Display for DayuFailResponse {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", serde_json::to_string_pretty(self).unwrap())
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum DayuResponse {
Send(DayuSendResponse),
Query(DayuQueryResponse),
Fail(DayuFailResponse),
}
#[derive(Default, Clone)]
pub struct Dayu {
client: Client,
access_key: String,
access_secret: String,
sign_name: String,
}
fn make_url(dayu: &Dayu, action: &str, params: &[(&str, &str)]) -> Result<Url, DayuError> {
if dayu.access_key.is_empty() {
return Err(DayuError::ConfigAbsence("access_key"));
}
if dayu.access_secret.is_empty() {
return Err(DayuError::ConfigAbsence("access_secret"));
}
if dayu.sign_name.is_empty() {
return Err(DayuError::ConfigAbsence("sign_name"));
}
let timestamp = Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string();
TextNonce::sized(32)
.map_err(DayuError::TextNonce)
.map(|v| v.to_string())
.and_then(|text_nonce| {
let mut map = BTreeMap::new();
map.insert("Format", REQUEST_FORMAT);
map.insert("AccessKeyId", &dayu.access_key);
map.insert("SignatureMethod", SIGN_METHOD);
map.insert("SignatureNonce", &text_nonce);
map.insert("SignatureVersion", SIGNATURE_VERSION);
map.insert("Timestamp", ×tamp);
map.insert("Action", action);
map.insert("SignName", &dayu.sign_name);
map.insert("Version", VERSION);
for &(name, value) in params {
if !value.is_empty() {
map.insert(name, value);
}
}
let mut forms = map
.into_iter()
.map(|(key, value)| (key, urlencoding::encode(value).into_owned()))
.collect::<Vec<(&str, String)>>();
let mut wait_sign = String::from("GET&%2F&");
wait_sign.push_str(
&forms
.iter()
.fold(vec![], |mut wait_sign, &(key, ref value)| {
wait_sign
.push(urlencoding::encode(&format!("{}={}", key, value)).into_owned());
wait_sign
})
.join(&urlencoding::encode("&")),
);
PKey::hmac(format!("{}&", &dayu.access_secret).as_bytes())
.and_then(|pkey| {
Signer::new(MessageDigest::sha1(), &pkey).and_then(|mut signer| {
signer
.update(wait_sign.as_bytes())
.and_then(|_| signer.sign_to_vec())
})
})
.map_err(Into::into)
.map(|ref signature| {
forms.push((
"Signature",
urlencoding::encode(&base64::encode(signature)).into_owned(),
))
})
.and_then(|_| {
Url::parse("https://dysmsapi.aliyuncs.com")
.map_err(Into::into)
.map(|mut url| {
url.set_query(Some(
&forms
.into_iter()
.map(|(key, value)| format!("{}={}", key, value))
.collect::<Vec<String>>()
.join("&"),
));
url
})
})
})
}
macro_rules! do_request {
($dayu:expr, $action:expr, $params:expr, $type:tt) => {{
let url = make_url($dayu, $action, $params)?;
$dayu
.client
.get(url)
.send()
.and_then(|response| response.json::<DayuResponse>())
.await
.map_err(Into::into)
.and_then(|json_response| match json_response {
DayuResponse::$type(v) => Ok(v),
DayuResponse::Fail(fail) => Err(DayuError::Dayu(fail)),
_ => unreachable!(),
})
}};
}
impl Dayu {
/// construct new dayu sdk instance
pub fn new() -> Self {
Self::default()
}
/// set dayu sdk's access key
pub fn set_access_key(mut self, access_key: impl Into<String>) -> Self {
self.access_key = access_key.into();
self
}
/// set dayu sdk's access secret
pub fn set_access_secret(mut self, access_secret: impl Into<String>) -> Self {
self.access_secret = access_secret.into();
self
}
/// set dayu sdk's sign name
pub fn set_sign_name(mut self, sign_name: impl Into<String>) -> Self {
self.sign_name = sign_name.into();
self
}
/// start send sms
/// phones: support multi phone number
/// template_code: SMS TEMPLATE CODE
/// template_param: SMS TEMPLATE PARAMS as JSON
pub async fn sms_send<P: AsRef<str>, T: AsRef<str>>(
&self,
phones: &[P],
template_code: T,
template_param: Option<&Value>,
) -> Result<DayuSendResponse, DayuError> {
let phone_numbers = phones
.iter()
.map(AsRef::as_ref)
.collect::<Vec<&str>>()
.join(",");
let template_param = template_param
.map(|v| serde_json::to_string(v).unwrap())
.unwrap_or_else(String::new);
do_request!(
self,
"SendSms",
&[
("TemplateCode", template_code.as_ref()),
("PhoneNumbers", &phone_numbers),
("TemplateParam", &template_param),
],
Send
)
}
/// query sms send detail
pub async fn sms_query(
&self,
phone_number: &str,
biz_id: Option<&str>,
send_date: NaiveDate,
current_page: u8,
page_size: u8,
) -> Result<DayuQueryResponse, DayuError> {
if | page_size > MAX_PAGE_SIZE {
return Err(DayuError::PageTooLarge(page_size));
}
let send_date = send_date.format("%Y%m%d").to_string();
let page_size = page_size.to_string();
let current_page = current_page.to_string();
do_request!(
self,
"QuerySendDetails",
&[
("PhoneNumber", phone_number),
("BizId", biz_id.unwrap_or("")),
("SendDate", &send_date),
("PageSize", &page_size),
("CurrentPage", ¤t_page),
],
Query
)
}
}
| identifier_body | |
lib.rs | // The MIT License (MIT)
// Copyright (c) 2018 Matrix.Zhang <113445886@qq.com>
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This library supports Alibaba's Dayu SMS SDK version of '2017-05-25'.
//!
//! ## Basic usage
//!
//! ```rust
//!use dayu::Dayu;
//!use serde_json::json;
//!
//!let dayu = Dayu::new()
//! .set_access_key("access_key")
//! .set_access_secret("access_secret")
//! .set_sign_name("阿里云测试短信");
//!dayu.sms_send(&["138XXXXXXXX"], "SMS_123456", Some(&json!({"customer": "Rust"}))).await.unwrap();
//! ```
use std::{
collections::BTreeMap,
convert::AsRef,
fmt::{self, Display, Formatter},
};
use chrono::{NaiveDate, Utc};
use futures_util::TryFutureExt;
use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use textnonce::TextNonce;
use thiserror::Error;
use url::Url;
static MAX_PAGE_SIZE: u8 = 50;
static REQUEST_FORMAT: &str = "JSON";
static SIGN_METHOD: &str = "HMAC-SHA1";
static SIGNATURE_VERSION: &str = "1.0";
static VERSION: &str = "2017-05-25";
#[derive(Debug, Error)]
pub enum DayuError {
#[error("config of '{0}' absence")]
ConfigAbsence(&'static str),
#[error("dayu response error: {0}")]
Dayu(DayuFailResponse),
#[error("openssl error: {0}")]
Openssl(#[from] openssl::error::ErrorStack),
#[error("page size '{0}' too large, max is 50")]
PageTooLarge(u8),
#[error("reqwest error: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("serde_json error: {0}")]
SerdeJson(#[from] serde_json::error::Error),
#[error("std io error: {0}")]
Stdio(#[from] std::io::Error),
#[error("textnonce error: {0}")]
TextNonce(String),
#[error("url parse error: {0}")]
UrlParse(#[from] url::ParseError),
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuSendResponse {
pub biz_id: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuQueryDetail {
pub phone_num: String,
pub send_date: String,
pub send_status: u8,
pub receive_date: String,
pub template_code: String,
pub content: String,
pub err_code: String,
}
#[derive(Debug, Deserialize)]
pub struct DayuQueryDetails {
#[serde(rename = "SmsSendDetailDTO")]
pub inner: Vec<DayuQueryDetail>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuQueryResponse {
pub total_count: i32,
pub total_page: Option<u8>,
#[serde(rename = "SmsSendDetailDTOs")]
pub details: Option<DayuQueryDetails>,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
pub struct DayuFailResponse {
pub code: String,
pub message: String,
pub request_id: String,
}
impl Display for DayuFailResponse {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", serde_json::to_string_pretty(self).unwrap())
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum DayuResponse {
Send(DayuSendResponse),
Query(DayuQueryResponse),
Fail(DayuFailResponse),
}
#[derive(Default, Clone)]
pub struct Dayu {
client: Client,
access_key: String,
access_secret: String,
sign_name: String,
}
fn make_url(dayu: &Dayu, action: &str, params: &[(&str, &str)]) -> Result<Url, DayuError> {
if dayu.access_key.is_empty() {
return Err(DayuError::ConfigAbsence("access_key"));
}
if dayu.access_secret.is_empty() {
ret | u.sign_name.is_empty() {
return Err(DayuError::ConfigAbsence("sign_name"));
}
let timestamp = Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string();
TextNonce::sized(32)
.map_err(DayuError::TextNonce)
.map(|v| v.to_string())
.and_then(|text_nonce| {
let mut map = BTreeMap::new();
map.insert("Format", REQUEST_FORMAT);
map.insert("AccessKeyId", &dayu.access_key);
map.insert("SignatureMethod", SIGN_METHOD);
map.insert("SignatureNonce", &text_nonce);
map.insert("SignatureVersion", SIGNATURE_VERSION);
map.insert("Timestamp", ×tamp);
map.insert("Action", action);
map.insert("SignName", &dayu.sign_name);
map.insert("Version", VERSION);
for &(name, value) in params {
if !value.is_empty() {
map.insert(name, value);
}
}
let mut forms = map
.into_iter()
.map(|(key, value)| (key, urlencoding::encode(value).into_owned()))
.collect::<Vec<(&str, String)>>();
let mut wait_sign = String::from("GET&%2F&");
wait_sign.push_str(
&forms
.iter()
.fold(vec![], |mut wait_sign, &(key, ref value)| {
wait_sign
.push(urlencoding::encode(&format!("{}={}", key, value)).into_owned());
wait_sign
})
.join(&urlencoding::encode("&")),
);
PKey::hmac(format!("{}&", &dayu.access_secret).as_bytes())
.and_then(|pkey| {
Signer::new(MessageDigest::sha1(), &pkey).and_then(|mut signer| {
signer
.update(wait_sign.as_bytes())
.and_then(|_| signer.sign_to_vec())
})
})
.map_err(Into::into)
.map(|ref signature| {
forms.push((
"Signature",
urlencoding::encode(&base64::encode(signature)).into_owned(),
))
})
.and_then(|_| {
Url::parse("https://dysmsapi.aliyuncs.com")
.map_err(Into::into)
.map(|mut url| {
url.set_query(Some(
&forms
.into_iter()
.map(|(key, value)| format!("{}={}", key, value))
.collect::<Vec<String>>()
.join("&"),
));
url
})
})
})
}
macro_rules! do_request {
($dayu:expr, $action:expr, $params:expr, $type:tt) => {{
let url = make_url($dayu, $action, $params)?;
$dayu
.client
.get(url)
.send()
.and_then(|response| response.json::<DayuResponse>())
.await
.map_err(Into::into)
.and_then(|json_response| match json_response {
DayuResponse::$type(v) => Ok(v),
DayuResponse::Fail(fail) => Err(DayuError::Dayu(fail)),
_ => unreachable!(),
})
}};
}
impl Dayu {
/// construct new dayu sdk instance
pub fn new() -> Self {
Self::default()
}
/// set dayu sdk's access key
pub fn set_access_key(mut self, access_key: impl Into<String>) -> Self {
self.access_key = access_key.into();
self
}
/// set dayu sdk's access secret
pub fn set_access_secret(mut self, access_secret: impl Into<String>) -> Self {
self.access_secret = access_secret.into();
self
}
/// set dayu sdk's sign name
pub fn set_sign_name(mut self, sign_name: impl Into<String>) -> Self {
self.sign_name = sign_name.into();
self
}
/// start send sms
/// phones: support multi phone number
/// template_code: SMS TEMPLATE CODE
/// template_param: SMS TEMPLATE PARAMS as JSON
pub async fn sms_send<P: AsRef<str>, T: AsRef<str>>(
&self,
phones: &[P],
template_code: T,
template_param: Option<&Value>,
) -> Result<DayuSendResponse, DayuError> {
let phone_numbers = phones
.iter()
.map(AsRef::as_ref)
.collect::<Vec<&str>>()
.join(",");
let template_param = template_param
.map(|v| serde_json::to_string(v).unwrap())
.unwrap_or_else(String::new);
do_request!(
self,
"SendSms",
&[
("TemplateCode", template_code.as_ref()),
("PhoneNumbers", &phone_numbers),
("TemplateParam", &template_param),
],
Send
)
}
/// query sms send detail
pub async fn sms_query(
&self,
phone_number: &str,
biz_id: Option<&str>,
send_date: NaiveDate,
current_page: u8,
page_size: u8,
) -> Result<DayuQueryResponse, DayuError> {
if page_size > MAX_PAGE_SIZE {
return Err(DayuError::PageTooLarge(page_size));
}
let send_date = send_date.format("%Y%m%d").to_string();
let page_size = page_size.to_string();
let current_page = current_page.to_string();
do_request!(
self,
"QuerySendDetails",
&[
("PhoneNumber", phone_number),
("BizId", biz_id.unwrap_or("")),
("SendDate", &send_date),
("PageSize", &page_size),
("CurrentPage", ¤t_page),
],
Query
)
}
}
| urn Err(DayuError::ConfigAbsence("access_secret"));
}
if day | conditional_block |
main.go | /**
* Ask user for search keywords
* Ask for duration
* Ask user for tweet include a template to insert usernames
* Ask user if they want to include multiple users in single tweet
* TODO:
* 1. [x] Tweet length
* 2. [ ] Tweet velocity
* [x] There is a limit on GET requests
* So we can run a subroutine will fill the temoUserList on regular interval
* [ ] Limit can not be determined for POST request in realtime :(
Expriment to get the error value when limit is crossed and when it is reset
Once the limit is crossed wait till it is reset
* 3. [x] Exit condition
* [x] Duration
* [x] Number of tweets
* 4. [x] Solve concurrent access issue
* 5. [x] Subroutine for fetching latest users
* 6. [x] Subroutine to send tweets
* 7. [x] Note down all the output into a log file so that it can be analyzed later
* 8. [+] Add some random content while replyinh to tweet
* 9. [-] Add suport for Tor Proxy
* 10. [x] Handle Non 200 error while replying
* 11. [-] Update log filename as {name}-{date}.log
* 12. [-] move log files inside log folder
* 13. [ ] Add option to filter negative keywords
* 14. [-] Multiple sender workers
* 15. [ ] Reply to new tweets first
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/garyburd/go-oauth/oauth"
//"io"
"bufio"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
var oauthClient = oauth.Client{
TemporaryCredentialRequestURI: "https://api.twitter.com/oauth/request_token",
ResourceOwnerAuthorizationURI: "https://api.twitter.com/oauth/authorize",
TokenRequestURI: "https://api.twitter.com/oauth/access_token",
}
var credPath = flag.String("config", "config.json", "Path to configuration file containing the application's credentials.")
func readCredentials() error {
b, err := ioutil.ReadFile(*credPath)
if err != nil {
return err
}
return json.Unmarshal(b, &oauthClient.Credentials)
}
var userList XUserList
var xReplyStatuses XReplyStatuses
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randSeq(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func | () {
userList.Init()
xReplyStatuses.Init()
// Inital Logging
InitLogging(true, true, true, true, false)
// Trace.Println("Tracing 123")
// Info.Println("Info 123")
// Warning.Println("Warning 123")
// Error.Println("Error 123")
// Debug.Println("Debug 123")
// os.Exit(0)
var endCriteriaValue int = 0
var tweetText, keywordSearch, endCriteria, direction string = "", "", "", ""
r := bufio.NewReader(os.Stdin)
// var resp http.Response
if err := readCredentials(); err != nil {
log.Fatal(err)
}
// tokenCred := &oauth.Credentials{Token: "2846849851-UNwMEPigXogDrdMAPfvsxxDsC8nY0wdzOHB8xVi", Secret: "YSR6OUbYqBkAPCwVq5TOH30YByd6TSniqERuUv8Ftp2sT"}
tempCred, err := oauthClient.RequestTemporaryCredentials(http.DefaultClient, "oob", nil)
if err != nil {
log.Fatal("RequestTemporaryCredentials:", err)
}
u := oauthClient.AuthorizationURL(tempCred, nil)
fmt.Printf("1. Go to %s\n2. Authorize the application\n3. Enter verification code:\n", u)
var code string
fmt.Scanln(&code)
tokenCred, _, err := oauthClient.RequestToken(http.DefaultClient, tempCred, code)
if err != nil {
log.Fatal(err)
}
// fmt.Println(tokenCred)
//contents, _ := ioutil.ReadAll()
// formTweet := url.Values{"status": {"You are simply amazing buddy"}}
// resp, err := oauthClient.Post(http.DefaultClient, tokenCred,
// "https://api.twitter.com/1.1/statuses/update.json", formTweet)
// defer resp.Body.Close()
// if err != nil {
// log.Fatal(err)
// } else {
// contents, _ := ioutil.ReadAll(resp.Body)
// //fmt.Printf("%s\n", contents)
// Debug.Printf("%s", contents)
// Error.Printf("%s", contents)
// }
// os.Exit(0)
fmt.Println(">> Enter search keywords: ")
keywordSearch, _ = r.ReadString('\n')
for !(endCriteria == "D" || endCriteria == "d" || endCriteria == "T" || endCriteria == "t") {
fmt.Println(">> End criteria ? (D: Duration / T: Number of tweets) ")
fmt.Scanln(&endCriteria)
endCriteria = strings.ToLower(endCriteria)
}
if endCriteria == "d" {
fmt.Println(">> Duration value in minutes: ")
} else if endCriteria == "t" {
fmt.Println(">> Number of tweets to reply: ")
}
fmt.Scanln(&endCriteriaValue)
fmt.Println(">> Enter tweet: ")
fmt.Println("Ex: Hey [user], check this awesome sketch http://bitly/xyz")
fmt.Println("1. [user] will be replaced by @username 2. Dont add important stuff like a link in the end, if username is long it will be truncated. 3. Keep some sapce for adding random #hashtag at the end to prevent getting blocked due to similar content")
tweetText, _ = r.ReadString('\n')
for len(tweetText) < 10 || len(tweetText) > 140 || (strings.Contains(tweetText, "[user]") && len(tweetText) >= 130) || !strings.Contains(tweetText, "[user]") {
if !strings.Contains(tweetText, "[user]") {
fmt.Println("[user] must be a part of the tweet, Please try again")
}
if len(tweetText) < 10 {
fmt.Println("Tweet too small, Please try again")
}
if len(tweetText) > 140 {
fmt.Println("Tweet too large, You entered", len(tweetText), "/140 characters. Please try again")
}
if strings.Contains(tweetText, "[user]") && len(tweetText) >= 130 {
fmt.Println("You must leave some character for including username, your current tweet length is ", len(tweetText), "/140")
}
fmt.Println(">> Enter tweet: ")
tweetText, _ = r.ReadString('\n')
}
for !(direction == "o" || direction == "O" || direction == "n" || direction == "N" || direction == "b" || direction == "B") {
fmt.Println(">> Enter direction: (O: Old Tweets / N: New Tweets / B: Both alternatively)")
fmt.Scanln(&direction)
direction = strings.ToLower(direction)
}
if endCriteria == "d" {
go func() {
time.Sleep(time.Duration(endCriteriaValue) * time.Minute)
os.Exit(0)
}()
}
// Run Goroutines
go searchTweets(keywordSearch, direction, tokenCred)
go statusUpdate(tweetText, endCriteria, endCriteriaValue, tokenCred)
// User can terminate by inputting "end" in console
endNow := "n"
for endNow != "end" {
fmt.Println(">> End Now? (end)")
fmt.Scanln(&endNow)
if endNow == "end" {
// TODO: Maybe dump some files
}
}
}
func searchTweets(keywordSearch string, direction string, tokenCred *oauth.Credentials) {
k := 0
var ramainingRequests, resetTimeStamp int64 = 0, 0
var maxId, minId int64 = 0, 0
for {
form := url.Values{}
// Find tweets. It returns only 100 whatever be the count. So sad :( Fuck you twitter
if k == 0 {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}}
//Debug.Println("No min No max")
}
if direction == "o" {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "max_id": {strconv.FormatInt(minId-1, 10)}}
//Debug.Println("OLD: MinId = ", minId)
}
if direction == "n" {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "since_id": {strconv.FormatInt(maxId+1, 10)}}
//Debug.Println("NEW: MaxId = ", maxId)
}
if direction == "b" {
if k%2 == 0 {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "max_id": {strconv.FormatInt(minId-1, 10)}}
//Debug.Println("BOTH: MinId = ", minId)
} else {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "since_id": {strconv.FormatInt(maxId+1, 10)}}
//Debug.Println("BOTH: MaxId = ", maxId)
}
}
//fmt.Println(form)
resp, err := oauthClient.Get(http.DefaultClient, tokenCred,
"https://api.twitter.com/1.1/search/tweets.json", form)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
}
ramainingRequests, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Remaining"][0], 10, 64)
//allowedRequests, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Limit"][0], 10, 64)
resetTimeStamp, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Reset"][0], 10, 64) // converted to miliseconds
resetTimeStamp *= 1000
var srobj searchResponse
searchResponseBody, _ := ioutil.ReadAll(resp.Body)
_ = json.Unmarshal(searchResponseBody, &srobj)
for i := range srobj.Statuses {
//Debug.Println(srobj.Statuses[i].Id)
if (strings.Contains(strings.ToLower(srobj.Statuses[i].Text), "trend")) != true {
userList.Set(srobj.Statuses[i].User.Id, srobj.Statuses[i].User)
// if _, ok := replyStatuses[srobj.Statuses[i].User.Id]; !ok {
// replyStatuses[srobj.Statuses[i].User.Id] = ReplyStatus{Replied: false}
// }
if !xReplyStatuses.IsSet(srobj.Statuses[i].User.Id) {
xReplyStatuses.Initiate(srobj.Statuses[i].User.Id)
}
}
if minId == 0 {
minId = srobj.Statuses[i].Id
} else if minId > srobj.Statuses[i].Id {
minId = srobj.Statuses[i].Id
}
if maxId == 0 {
maxId = srobj.Statuses[i].Id
} else if maxId < srobj.Statuses[i].Id {
maxId = srobj.Statuses[i].Id
}
}
if resp.StatusCode != 200 {
Debug.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
Error.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
} else {
Info.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
if len(srobj.Statuses) > 0 {
if direction == "b" {
if k%2 == 0 {
Debug.Printf("%d old tweets found", len(srobj.Statuses))
} else {
Debug.Printf("%d new tweets found", len(srobj.Statuses))
}
}
if direction == "o" {
Debug.Printf("%d old tweets found", len(srobj.Statuses))
}
if direction == "n" {
Debug.Printf("%d new tweets found", len(srobj.Statuses))
}
}
}
// Insert calculated delay (Useful when there will be multiple senders)
var delay int64
if ramainingRequests != 0 {
delay = (resetTimeStamp - time.Now().UnixNano()/int64(time.Millisecond)) / ramainingRequests
} else {
delay = (resetTimeStamp - time.Now().UnixNano()/int64(time.Millisecond))
}
//fmt.Printf("Pass ends sleeping for %d seconds", delay/1000)
time.Sleep(time.Duration(delay) * time.Millisecond)
// Because there is a limit to how much tweet a user can send
// time.Sleep(5 * time.Millisecond)
k++
}
}
func statusUpdate(tweetText, endCriteria string, endCriteriaValue int, tokenCred *oauth.Credentials) {
time.Sleep(5 * time.Second) // Wait for some time till userList is populated
k := 0
totalPeopleReplied := 0
for {
// Fill tempUserList outside
// While adding stuff to tempUserList check if replyStatuses has a value
useridList := xReplyStatuses.ListUseridUnsent()
peopleReplied := 0
for len(useridList) == 0 {
time.Sleep(1 * time.Second)
}
for j := range useridList {
totalPeopleReplied++
currentUserId := useridList[j]
currentUser := userList.Get(currentUserId)
processedTweetText := strings.Replace(tweetText, "[user]", "@"+currentUser.ScreenName, -1) + " #" + randSeq(3)
formTweet := url.Values{"status": {processedTweetText}}
resp, err := oauthClient.Post(http.DefaultClient, tokenCred,
"https://api.twitter.com/1.1/statuses/update.json", formTweet)
searchResponseBody, _ := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
// } else {
// contents, _ := ioutil.ReadAll(resp.Body)
// fmt.Printf("%s\n", contents)
}
xReplyStatuses.Sent(currentUserId)
if resp.StatusCode != 200 {
Debug.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
Error.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
} else {
Info.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
//Debug.Printf("%s", processedTweetText)
}
if resp.StatusCode == 403 {
var errobj TwitterResponseError
_ = json.Unmarshal(searchResponseBody, &errobj)
if errobj.errors[0].Code == 185 {
// TODO: Daily user limit reached
// Stop for half an hour
Debug.Printf("Tweet limit reached, Sleeping for half an hour")
time.Sleep(30 * time.Minute)
}
if errobj.errors[0].Code == 226 {
// Inform user to create another campaign and exit
Debug.Printf("Seems like twitter has detected your campaign as spam try another campign, may be another user too")
os.Exit(0)
}
}
if endCriteria == "t" && totalPeopleReplied >= endCriteriaValue {
fmt.Println("Exiting because end criteria reached. Number of tweets replied : ", totalPeopleReplied)
os.Exit(0)
}
peopleReplied++
// User can only send 50 request per half-an-hour (Post every 36 seconds)
time.Sleep(time.Duration(rand.Intn(5)+3) * time.Second) // Wait for 3 to 7 seconds
}
k++
Debug.Printf("%d people replied", peopleReplied)
}
}
| main | identifier_name |
main.go | /**
* Ask user for search keywords
* Ask for duration
* Ask user for tweet include a template to insert usernames
* Ask user if they want to include multiple users in single tweet
* TODO:
* 1. [x] Tweet length
* 2. [ ] Tweet velocity
* [x] There is a limit on GET requests
* So we can run a subroutine will fill the temoUserList on regular interval
* [ ] Limit can not be determined for POST request in realtime :(
Expriment to get the error value when limit is crossed and when it is reset
Once the limit is crossed wait till it is reset
* 3. [x] Exit condition
* [x] Duration
* [x] Number of tweets
* 4. [x] Solve concurrent access issue
* 5. [x] Subroutine for fetching latest users
* 6. [x] Subroutine to send tweets
* 7. [x] Note down all the output into a log file so that it can be analyzed later
* 8. [+] Add some random content while replyinh to tweet
* 9. [-] Add suport for Tor Proxy
* 10. [x] Handle Non 200 error while replying
* 11. [-] Update log filename as {name}-{date}.log
* 12. [-] move log files inside log folder
* 13. [ ] Add option to filter negative keywords
* 14. [-] Multiple sender workers
* 15. [ ] Reply to new tweets first
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/garyburd/go-oauth/oauth"
//"io"
"bufio"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
var oauthClient = oauth.Client{
TemporaryCredentialRequestURI: "https://api.twitter.com/oauth/request_token",
ResourceOwnerAuthorizationURI: "https://api.twitter.com/oauth/authorize",
TokenRequestURI: "https://api.twitter.com/oauth/access_token",
}
var credPath = flag.String("config", "config.json", "Path to configuration file containing the application's credentials.")
func readCredentials() error {
b, err := ioutil.ReadFile(*credPath)
if err != nil {
return err
}
return json.Unmarshal(b, &oauthClient.Credentials)
}
var userList XUserList
var xReplyStatuses XReplyStatuses
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randSeq(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func main() {
userList.Init()
xReplyStatuses.Init()
// Inital Logging
InitLogging(true, true, true, true, false)
// Trace.Println("Tracing 123")
// Info.Println("Info 123")
// Warning.Println("Warning 123")
// Error.Println("Error 123")
// Debug.Println("Debug 123")
// os.Exit(0)
var endCriteriaValue int = 0
var tweetText, keywordSearch, endCriteria, direction string = "", "", "", ""
r := bufio.NewReader(os.Stdin)
// var resp http.Response
if err := readCredentials(); err != nil {
log.Fatal(err)
}
// tokenCred := &oauth.Credentials{Token: "2846849851-UNwMEPigXogDrdMAPfvsxxDsC8nY0wdzOHB8xVi", Secret: "YSR6OUbYqBkAPCwVq5TOH30YByd6TSniqERuUv8Ftp2sT"}
tempCred, err := oauthClient.RequestTemporaryCredentials(http.DefaultClient, "oob", nil)
if err != nil {
log.Fatal("RequestTemporaryCredentials:", err)
}
u := oauthClient.AuthorizationURL(tempCred, nil)
fmt.Printf("1. Go to %s\n2. Authorize the application\n3. Enter verification code:\n", u)
var code string
fmt.Scanln(&code)
tokenCred, _, err := oauthClient.RequestToken(http.DefaultClient, tempCred, code)
if err != nil {
log.Fatal(err)
}
// fmt.Println(tokenCred)
//contents, _ := ioutil.ReadAll()
// formTweet := url.Values{"status": {"You are simply amazing buddy"}}
// resp, err := oauthClient.Post(http.DefaultClient, tokenCred,
// "https://api.twitter.com/1.1/statuses/update.json", formTweet)
// defer resp.Body.Close()
// if err != nil {
// log.Fatal(err)
// } else {
// contents, _ := ioutil.ReadAll(resp.Body)
// //fmt.Printf("%s\n", contents)
// Debug.Printf("%s", contents)
// Error.Printf("%s", contents)
// }
// os.Exit(0)
fmt.Println(">> Enter search keywords: ")
keywordSearch, _ = r.ReadString('\n')
for !(endCriteria == "D" || endCriteria == "d" || endCriteria == "T" || endCriteria == "t") {
fmt.Println(">> End criteria ? (D: Duration / T: Number of tweets) ")
fmt.Scanln(&endCriteria)
endCriteria = strings.ToLower(endCriteria)
}
if endCriteria == "d" {
fmt.Println(">> Duration value in minutes: ")
} else if endCriteria == "t" {
fmt.Println(">> Number of tweets to reply: ")
}
fmt.Scanln(&endCriteriaValue)
fmt.Println(">> Enter tweet: ")
fmt.Println("Ex: Hey [user], check this awesome sketch http://bitly/xyz")
fmt.Println("1. [user] will be replaced by @username 2. Dont add important stuff like a link in the end, if username is long it will be truncated. 3. Keep some sapce for adding random #hashtag at the end to prevent getting blocked due to similar content")
tweetText, _ = r.ReadString('\n')
for len(tweetText) < 10 || len(tweetText) > 140 || (strings.Contains(tweetText, "[user]") && len(tweetText) >= 130) || !strings.Contains(tweetText, "[user]") {
if !strings.Contains(tweetText, "[user]") {
fmt.Println("[user] must be a part of the tweet, Please try again")
}
if len(tweetText) < 10 {
fmt.Println("Tweet too small, Please try again")
}
if len(tweetText) > 140 {
fmt.Println("Tweet too large, You entered", len(tweetText), "/140 characters. Please try again")
}
if strings.Contains(tweetText, "[user]") && len(tweetText) >= 130 {
fmt.Println("You must leave some character for including username, your current tweet length is ", len(tweetText), "/140")
}
fmt.Println(">> Enter tweet: ")
tweetText, _ = r.ReadString('\n')
}
for !(direction == "o" || direction == "O" || direction == "n" || direction == "N" || direction == "b" || direction == "B") {
fmt.Println(">> Enter direction: (O: Old Tweets / N: New Tweets / B: Both alternatively)")
fmt.Scanln(&direction)
direction = strings.ToLower(direction)
}
if endCriteria == "d" {
go func() {
time.Sleep(time.Duration(endCriteriaValue) * time.Minute)
os.Exit(0)
}()
}
// Run Goroutines
go searchTweets(keywordSearch, direction, tokenCred)
go statusUpdate(tweetText, endCriteria, endCriteriaValue, tokenCred)
// User can terminate by inputting "end" in console
endNow := "n"
for endNow != "end" {
fmt.Println(">> End Now? (end)")
fmt.Scanln(&endNow)
if endNow == "end" {
// TODO: Maybe dump some files
}
}
}
func searchTweets(keywordSearch string, direction string, tokenCred *oauth.Credentials) {
k := 0
var ramainingRequests, resetTimeStamp int64 = 0, 0
var maxId, minId int64 = 0, 0
for {
form := url.Values{}
// Find tweets. It returns only 100 whatever be the count. So sad :( Fuck you twitter
if k == 0 |
if direction == "o" {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "max_id": {strconv.FormatInt(minId-1, 10)}}
//Debug.Println("OLD: MinId = ", minId)
}
if direction == "n" {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "since_id": {strconv.FormatInt(maxId+1, 10)}}
//Debug.Println("NEW: MaxId = ", maxId)
}
if direction == "b" {
if k%2 == 0 {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "max_id": {strconv.FormatInt(minId-1, 10)}}
//Debug.Println("BOTH: MinId = ", minId)
} else {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "since_id": {strconv.FormatInt(maxId+1, 10)}}
//Debug.Println("BOTH: MaxId = ", maxId)
}
}
//fmt.Println(form)
resp, err := oauthClient.Get(http.DefaultClient, tokenCred,
"https://api.twitter.com/1.1/search/tweets.json", form)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
}
ramainingRequests, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Remaining"][0], 10, 64)
//allowedRequests, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Limit"][0], 10, 64)
resetTimeStamp, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Reset"][0], 10, 64) // converted to miliseconds
resetTimeStamp *= 1000
var srobj searchResponse
searchResponseBody, _ := ioutil.ReadAll(resp.Body)
_ = json.Unmarshal(searchResponseBody, &srobj)
for i := range srobj.Statuses {
//Debug.Println(srobj.Statuses[i].Id)
if (strings.Contains(strings.ToLower(srobj.Statuses[i].Text), "trend")) != true {
userList.Set(srobj.Statuses[i].User.Id, srobj.Statuses[i].User)
// if _, ok := replyStatuses[srobj.Statuses[i].User.Id]; !ok {
// replyStatuses[srobj.Statuses[i].User.Id] = ReplyStatus{Replied: false}
// }
if !xReplyStatuses.IsSet(srobj.Statuses[i].User.Id) {
xReplyStatuses.Initiate(srobj.Statuses[i].User.Id)
}
}
if minId == 0 {
minId = srobj.Statuses[i].Id
} else if minId > srobj.Statuses[i].Id {
minId = srobj.Statuses[i].Id
}
if maxId == 0 {
maxId = srobj.Statuses[i].Id
} else if maxId < srobj.Statuses[i].Id {
maxId = srobj.Statuses[i].Id
}
}
if resp.StatusCode != 200 {
Debug.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
Error.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
} else {
Info.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
if len(srobj.Statuses) > 0 {
if direction == "b" {
if k%2 == 0 {
Debug.Printf("%d old tweets found", len(srobj.Statuses))
} else {
Debug.Printf("%d new tweets found", len(srobj.Statuses))
}
}
if direction == "o" {
Debug.Printf("%d old tweets found", len(srobj.Statuses))
}
if direction == "n" {
Debug.Printf("%d new tweets found", len(srobj.Statuses))
}
}
}
// Insert calculated delay (Useful when there will be multiple senders)
var delay int64
if ramainingRequests != 0 {
delay = (resetTimeStamp - time.Now().UnixNano()/int64(time.Millisecond)) / ramainingRequests
} else {
delay = (resetTimeStamp - time.Now().UnixNano()/int64(time.Millisecond))
}
//fmt.Printf("Pass ends sleeping for %d seconds", delay/1000)
time.Sleep(time.Duration(delay) * time.Millisecond)
// Because there is a limit to how much tweet a user can send
// time.Sleep(5 * time.Millisecond)
k++
}
}
func statusUpdate(tweetText, endCriteria string, endCriteriaValue int, tokenCred *oauth.Credentials) {
time.Sleep(5 * time.Second) // Wait for some time till userList is populated
k := 0
totalPeopleReplied := 0
for {
// Fill tempUserList outside
// While adding stuff to tempUserList check if replyStatuses has a value
useridList := xReplyStatuses.ListUseridUnsent()
peopleReplied := 0
for len(useridList) == 0 {
time.Sleep(1 * time.Second)
}
for j := range useridList {
totalPeopleReplied++
currentUserId := useridList[j]
currentUser := userList.Get(currentUserId)
processedTweetText := strings.Replace(tweetText, "[user]", "@"+currentUser.ScreenName, -1) + " #" + randSeq(3)
formTweet := url.Values{"status": {processedTweetText}}
resp, err := oauthClient.Post(http.DefaultClient, tokenCred,
"https://api.twitter.com/1.1/statuses/update.json", formTweet)
searchResponseBody, _ := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
// } else {
// contents, _ := ioutil.ReadAll(resp.Body)
// fmt.Printf("%s\n", contents)
}
xReplyStatuses.Sent(currentUserId)
if resp.StatusCode != 200 {
Debug.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
Error.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
} else {
Info.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
//Debug.Printf("%s", processedTweetText)
}
if resp.StatusCode == 403 {
var errobj TwitterResponseError
_ = json.Unmarshal(searchResponseBody, &errobj)
if errobj.errors[0].Code == 185 {
// TODO: Daily user limit reached
// Stop for half an hour
Debug.Printf("Tweet limit reached, Sleeping for half an hour")
time.Sleep(30 * time.Minute)
}
if errobj.errors[0].Code == 226 {
// Inform user to create another campaign and exit
Debug.Printf("Seems like twitter has detected your campaign as spam try another campign, may be another user too")
os.Exit(0)
}
}
if endCriteria == "t" && totalPeopleReplied >= endCriteriaValue {
fmt.Println("Exiting because end criteria reached. Number of tweets replied : ", totalPeopleReplied)
os.Exit(0)
}
peopleReplied++
// User can only send 50 request per half-an-hour (Post every 36 seconds)
time.Sleep(time.Duration(rand.Intn(5)+3) * time.Second) // Wait for 3 to 7 seconds
}
k++
Debug.Printf("%d people replied", peopleReplied)
}
}
| {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}}
//Debug.Println("No min No max")
} | conditional_block |
main.go | /**
* Ask user for search keywords
* Ask for duration
* Ask user for tweet include a template to insert usernames
* Ask user if they want to include multiple users in single tweet
* TODO:
* 1. [x] Tweet length
* 2. [ ] Tweet velocity
* [x] There is a limit on GET requests
* So we can run a subroutine will fill the temoUserList on regular interval
* [ ] Limit can not be determined for POST request in realtime :(
Expriment to get the error value when limit is crossed and when it is reset
Once the limit is crossed wait till it is reset
* 3. [x] Exit condition
* [x] Duration
* [x] Number of tweets
* 4. [x] Solve concurrent access issue
* 5. [x] Subroutine for fetching latest users
* 6. [x] Subroutine to send tweets
* 7. [x] Note down all the output into a log file so that it can be analyzed later
* 8. [+] Add some random content while replyinh to tweet
* 9. [-] Add suport for Tor Proxy
* 10. [x] Handle Non 200 error while replying
* 11. [-] Update log filename as {name}-{date}.log
* 12. [-] move log files inside log folder
* 13. [ ] Add option to filter negative keywords
* 14. [-] Multiple sender workers
* 15. [ ] Reply to new tweets first
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/garyburd/go-oauth/oauth"
//"io"
"bufio"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url" | "strconv"
"strings"
"time"
)
var oauthClient = oauth.Client{
TemporaryCredentialRequestURI: "https://api.twitter.com/oauth/request_token",
ResourceOwnerAuthorizationURI: "https://api.twitter.com/oauth/authorize",
TokenRequestURI: "https://api.twitter.com/oauth/access_token",
}
var credPath = flag.String("config", "config.json", "Path to configuration file containing the application's credentials.")
func readCredentials() error {
b, err := ioutil.ReadFile(*credPath)
if err != nil {
return err
}
return json.Unmarshal(b, &oauthClient.Credentials)
}
var userList XUserList
var xReplyStatuses XReplyStatuses
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randSeq(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func main() {
userList.Init()
xReplyStatuses.Init()
// Inital Logging
InitLogging(true, true, true, true, false)
// Trace.Println("Tracing 123")
// Info.Println("Info 123")
// Warning.Println("Warning 123")
// Error.Println("Error 123")
// Debug.Println("Debug 123")
// os.Exit(0)
var endCriteriaValue int = 0
var tweetText, keywordSearch, endCriteria, direction string = "", "", "", ""
r := bufio.NewReader(os.Stdin)
// var resp http.Response
if err := readCredentials(); err != nil {
log.Fatal(err)
}
// tokenCred := &oauth.Credentials{Token: "2846849851-UNwMEPigXogDrdMAPfvsxxDsC8nY0wdzOHB8xVi", Secret: "YSR6OUbYqBkAPCwVq5TOH30YByd6TSniqERuUv8Ftp2sT"}
tempCred, err := oauthClient.RequestTemporaryCredentials(http.DefaultClient, "oob", nil)
if err != nil {
log.Fatal("RequestTemporaryCredentials:", err)
}
u := oauthClient.AuthorizationURL(tempCred, nil)
fmt.Printf("1. Go to %s\n2. Authorize the application\n3. Enter verification code:\n", u)
var code string
fmt.Scanln(&code)
tokenCred, _, err := oauthClient.RequestToken(http.DefaultClient, tempCred, code)
if err != nil {
log.Fatal(err)
}
// fmt.Println(tokenCred)
//contents, _ := ioutil.ReadAll()
// formTweet := url.Values{"status": {"You are simply amazing buddy"}}
// resp, err := oauthClient.Post(http.DefaultClient, tokenCred,
// "https://api.twitter.com/1.1/statuses/update.json", formTweet)
// defer resp.Body.Close()
// if err != nil {
// log.Fatal(err)
// } else {
// contents, _ := ioutil.ReadAll(resp.Body)
// //fmt.Printf("%s\n", contents)
// Debug.Printf("%s", contents)
// Error.Printf("%s", contents)
// }
// os.Exit(0)
fmt.Println(">> Enter search keywords: ")
keywordSearch, _ = r.ReadString('\n')
for !(endCriteria == "D" || endCriteria == "d" || endCriteria == "T" || endCriteria == "t") {
fmt.Println(">> End criteria ? (D: Duration / T: Number of tweets) ")
fmt.Scanln(&endCriteria)
endCriteria = strings.ToLower(endCriteria)
}
if endCriteria == "d" {
fmt.Println(">> Duration value in minutes: ")
} else if endCriteria == "t" {
fmt.Println(">> Number of tweets to reply: ")
}
fmt.Scanln(&endCriteriaValue)
fmt.Println(">> Enter tweet: ")
fmt.Println("Ex: Hey [user], check this awesome sketch http://bitly/xyz")
fmt.Println("1. [user] will be replaced by @username 2. Dont add important stuff like a link in the end, if username is long it will be truncated. 3. Keep some sapce for adding random #hashtag at the end to prevent getting blocked due to similar content")
tweetText, _ = r.ReadString('\n')
for len(tweetText) < 10 || len(tweetText) > 140 || (strings.Contains(tweetText, "[user]") && len(tweetText) >= 130) || !strings.Contains(tweetText, "[user]") {
if !strings.Contains(tweetText, "[user]") {
fmt.Println("[user] must be a part of the tweet, Please try again")
}
if len(tweetText) < 10 {
fmt.Println("Tweet too small, Please try again")
}
if len(tweetText) > 140 {
fmt.Println("Tweet too large, You entered", len(tweetText), "/140 characters. Please try again")
}
if strings.Contains(tweetText, "[user]") && len(tweetText) >= 130 {
fmt.Println("You must leave some character for including username, your current tweet length is ", len(tweetText), "/140")
}
fmt.Println(">> Enter tweet: ")
tweetText, _ = r.ReadString('\n')
}
for !(direction == "o" || direction == "O" || direction == "n" || direction == "N" || direction == "b" || direction == "B") {
fmt.Println(">> Enter direction: (O: Old Tweets / N: New Tweets / B: Both alternatively)")
fmt.Scanln(&direction)
direction = strings.ToLower(direction)
}
if endCriteria == "d" {
go func() {
time.Sleep(time.Duration(endCriteriaValue) * time.Minute)
os.Exit(0)
}()
}
// Run Goroutines
go searchTweets(keywordSearch, direction, tokenCred)
go statusUpdate(tweetText, endCriteria, endCriteriaValue, tokenCred)
// User can terminate by inputting "end" in console
endNow := "n"
for endNow != "end" {
fmt.Println(">> End Now? (end)")
fmt.Scanln(&endNow)
if endNow == "end" {
// TODO: Maybe dump some files
}
}
}
func searchTweets(keywordSearch string, direction string, tokenCred *oauth.Credentials) {
k := 0
var ramainingRequests, resetTimeStamp int64 = 0, 0
var maxId, minId int64 = 0, 0
for {
form := url.Values{}
// Find tweets. It returns only 100 whatever be the count. So sad :( Fuck you twitter
if k == 0 {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}}
//Debug.Println("No min No max")
}
if direction == "o" {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "max_id": {strconv.FormatInt(minId-1, 10)}}
//Debug.Println("OLD: MinId = ", minId)
}
if direction == "n" {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "since_id": {strconv.FormatInt(maxId+1, 10)}}
//Debug.Println("NEW: MaxId = ", maxId)
}
if direction == "b" {
if k%2 == 0 {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "max_id": {strconv.FormatInt(minId-1, 10)}}
//Debug.Println("BOTH: MinId = ", minId)
} else {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "since_id": {strconv.FormatInt(maxId+1, 10)}}
//Debug.Println("BOTH: MaxId = ", maxId)
}
}
//fmt.Println(form)
resp, err := oauthClient.Get(http.DefaultClient, tokenCred,
"https://api.twitter.com/1.1/search/tweets.json", form)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
}
ramainingRequests, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Remaining"][0], 10, 64)
//allowedRequests, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Limit"][0], 10, 64)
resetTimeStamp, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Reset"][0], 10, 64) // converted to miliseconds
resetTimeStamp *= 1000
var srobj searchResponse
searchResponseBody, _ := ioutil.ReadAll(resp.Body)
_ = json.Unmarshal(searchResponseBody, &srobj)
for i := range srobj.Statuses {
//Debug.Println(srobj.Statuses[i].Id)
if (strings.Contains(strings.ToLower(srobj.Statuses[i].Text), "trend")) != true {
userList.Set(srobj.Statuses[i].User.Id, srobj.Statuses[i].User)
// if _, ok := replyStatuses[srobj.Statuses[i].User.Id]; !ok {
// replyStatuses[srobj.Statuses[i].User.Id] = ReplyStatus{Replied: false}
// }
if !xReplyStatuses.IsSet(srobj.Statuses[i].User.Id) {
xReplyStatuses.Initiate(srobj.Statuses[i].User.Id)
}
}
if minId == 0 {
minId = srobj.Statuses[i].Id
} else if minId > srobj.Statuses[i].Id {
minId = srobj.Statuses[i].Id
}
if maxId == 0 {
maxId = srobj.Statuses[i].Id
} else if maxId < srobj.Statuses[i].Id {
maxId = srobj.Statuses[i].Id
}
}
if resp.StatusCode != 200 {
Debug.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
Error.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
} else {
Info.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
if len(srobj.Statuses) > 0 {
if direction == "b" {
if k%2 == 0 {
Debug.Printf("%d old tweets found", len(srobj.Statuses))
} else {
Debug.Printf("%d new tweets found", len(srobj.Statuses))
}
}
if direction == "o" {
Debug.Printf("%d old tweets found", len(srobj.Statuses))
}
if direction == "n" {
Debug.Printf("%d new tweets found", len(srobj.Statuses))
}
}
}
// Insert calculated delay (Useful when there will be multiple senders)
var delay int64
if ramainingRequests != 0 {
delay = (resetTimeStamp - time.Now().UnixNano()/int64(time.Millisecond)) / ramainingRequests
} else {
delay = (resetTimeStamp - time.Now().UnixNano()/int64(time.Millisecond))
}
//fmt.Printf("Pass ends sleeping for %d seconds", delay/1000)
time.Sleep(time.Duration(delay) * time.Millisecond)
// Because there is a limit to how much tweet a user can send
// time.Sleep(5 * time.Millisecond)
k++
}
}
func statusUpdate(tweetText, endCriteria string, endCriteriaValue int, tokenCred *oauth.Credentials) {
time.Sleep(5 * time.Second) // Wait for some time till userList is populated
k := 0
totalPeopleReplied := 0
for {
// Fill tempUserList outside
// While adding stuff to tempUserList check if replyStatuses has a value
useridList := xReplyStatuses.ListUseridUnsent()
peopleReplied := 0
for len(useridList) == 0 {
time.Sleep(1 * time.Second)
}
for j := range useridList {
totalPeopleReplied++
currentUserId := useridList[j]
currentUser := userList.Get(currentUserId)
processedTweetText := strings.Replace(tweetText, "[user]", "@"+currentUser.ScreenName, -1) + " #" + randSeq(3)
formTweet := url.Values{"status": {processedTweetText}}
resp, err := oauthClient.Post(http.DefaultClient, tokenCred,
"https://api.twitter.com/1.1/statuses/update.json", formTweet)
searchResponseBody, _ := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
// } else {
// contents, _ := ioutil.ReadAll(resp.Body)
// fmt.Printf("%s\n", contents)
}
xReplyStatuses.Sent(currentUserId)
if resp.StatusCode != 200 {
Debug.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
Error.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
} else {
Info.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
//Debug.Printf("%s", processedTweetText)
}
if resp.StatusCode == 403 {
var errobj TwitterResponseError
_ = json.Unmarshal(searchResponseBody, &errobj)
if errobj.errors[0].Code == 185 {
// TODO: Daily user limit reached
// Stop for half an hour
Debug.Printf("Tweet limit reached, Sleeping for half an hour")
time.Sleep(30 * time.Minute)
}
if errobj.errors[0].Code == 226 {
// Inform user to create another campaign and exit
Debug.Printf("Seems like twitter has detected your campaign as spam try another campign, may be another user too")
os.Exit(0)
}
}
if endCriteria == "t" && totalPeopleReplied >= endCriteriaValue {
fmt.Println("Exiting because end criteria reached. Number of tweets replied : ", totalPeopleReplied)
os.Exit(0)
}
peopleReplied++
// User can only send 50 request per half-an-hour (Post every 36 seconds)
time.Sleep(time.Duration(rand.Intn(5)+3) * time.Second) // Wait for 3 to 7 seconds
}
k++
Debug.Printf("%d people replied", peopleReplied)
}
} | "os" | random_line_split |
main.go | /**
* Ask user for search keywords
* Ask for duration
* Ask user for tweet include a template to insert usernames
* Ask user if they want to include multiple users in single tweet
* TODO:
* 1. [x] Tweet length
* 2. [ ] Tweet velocity
* [x] There is a limit on GET requests
* So we can run a subroutine will fill the temoUserList on regular interval
* [ ] Limit can not be determined for POST request in realtime :(
Expriment to get the error value when limit is crossed and when it is reset
Once the limit is crossed wait till it is reset
* 3. [x] Exit condition
* [x] Duration
* [x] Number of tweets
* 4. [x] Solve concurrent access issue
* 5. [x] Subroutine for fetching latest users
* 6. [x] Subroutine to send tweets
* 7. [x] Note down all the output into a log file so that it can be analyzed later
* 8. [+] Add some random content while replyinh to tweet
* 9. [-] Add suport for Tor Proxy
* 10. [x] Handle Non 200 error while replying
* 11. [-] Update log filename as {name}-{date}.log
* 12. [-] move log files inside log folder
* 13. [ ] Add option to filter negative keywords
* 14. [-] Multiple sender workers
* 15. [ ] Reply to new tweets first
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/garyburd/go-oauth/oauth"
//"io"
"bufio"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
var oauthClient = oauth.Client{
TemporaryCredentialRequestURI: "https://api.twitter.com/oauth/request_token",
ResourceOwnerAuthorizationURI: "https://api.twitter.com/oauth/authorize",
TokenRequestURI: "https://api.twitter.com/oauth/access_token",
}
var credPath = flag.String("config", "config.json", "Path to configuration file containing the application's credentials.")
func readCredentials() error |
var userList XUserList
var xReplyStatuses XReplyStatuses
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randSeq(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func main() {
userList.Init()
xReplyStatuses.Init()
// Inital Logging
InitLogging(true, true, true, true, false)
// Trace.Println("Tracing 123")
// Info.Println("Info 123")
// Warning.Println("Warning 123")
// Error.Println("Error 123")
// Debug.Println("Debug 123")
// os.Exit(0)
var endCriteriaValue int = 0
var tweetText, keywordSearch, endCriteria, direction string = "", "", "", ""
r := bufio.NewReader(os.Stdin)
// var resp http.Response
if err := readCredentials(); err != nil {
log.Fatal(err)
}
// tokenCred := &oauth.Credentials{Token: "2846849851-UNwMEPigXogDrdMAPfvsxxDsC8nY0wdzOHB8xVi", Secret: "YSR6OUbYqBkAPCwVq5TOH30YByd6TSniqERuUv8Ftp2sT"}
tempCred, err := oauthClient.RequestTemporaryCredentials(http.DefaultClient, "oob", nil)
if err != nil {
log.Fatal("RequestTemporaryCredentials:", err)
}
u := oauthClient.AuthorizationURL(tempCred, nil)
fmt.Printf("1. Go to %s\n2. Authorize the application\n3. Enter verification code:\n", u)
var code string
fmt.Scanln(&code)
tokenCred, _, err := oauthClient.RequestToken(http.DefaultClient, tempCred, code)
if err != nil {
log.Fatal(err)
}
// fmt.Println(tokenCred)
//contents, _ := ioutil.ReadAll()
// formTweet := url.Values{"status": {"You are simply amazing buddy"}}
// resp, err := oauthClient.Post(http.DefaultClient, tokenCred,
// "https://api.twitter.com/1.1/statuses/update.json", formTweet)
// defer resp.Body.Close()
// if err != nil {
// log.Fatal(err)
// } else {
// contents, _ := ioutil.ReadAll(resp.Body)
// //fmt.Printf("%s\n", contents)
// Debug.Printf("%s", contents)
// Error.Printf("%s", contents)
// }
// os.Exit(0)
fmt.Println(">> Enter search keywords: ")
keywordSearch, _ = r.ReadString('\n')
for !(endCriteria == "D" || endCriteria == "d" || endCriteria == "T" || endCriteria == "t") {
fmt.Println(">> End criteria ? (D: Duration / T: Number of tweets) ")
fmt.Scanln(&endCriteria)
endCriteria = strings.ToLower(endCriteria)
}
if endCriteria == "d" {
fmt.Println(">> Duration value in minutes: ")
} else if endCriteria == "t" {
fmt.Println(">> Number of tweets to reply: ")
}
fmt.Scanln(&endCriteriaValue)
fmt.Println(">> Enter tweet: ")
fmt.Println("Ex: Hey [user], check this awesome sketch http://bitly/xyz")
fmt.Println("1. [user] will be replaced by @username 2. Dont add important stuff like a link in the end, if username is long it will be truncated. 3. Keep some sapce for adding random #hashtag at the end to prevent getting blocked due to similar content")
tweetText, _ = r.ReadString('\n')
for len(tweetText) < 10 || len(tweetText) > 140 || (strings.Contains(tweetText, "[user]") && len(tweetText) >= 130) || !strings.Contains(tweetText, "[user]") {
if !strings.Contains(tweetText, "[user]") {
fmt.Println("[user] must be a part of the tweet, Please try again")
}
if len(tweetText) < 10 {
fmt.Println("Tweet too small, Please try again")
}
if len(tweetText) > 140 {
fmt.Println("Tweet too large, You entered", len(tweetText), "/140 characters. Please try again")
}
if strings.Contains(tweetText, "[user]") && len(tweetText) >= 130 {
fmt.Println("You must leave some character for including username, your current tweet length is ", len(tweetText), "/140")
}
fmt.Println(">> Enter tweet: ")
tweetText, _ = r.ReadString('\n')
}
for !(direction == "o" || direction == "O" || direction == "n" || direction == "N" || direction == "b" || direction == "B") {
fmt.Println(">> Enter direction: (O: Old Tweets / N: New Tweets / B: Both alternatively)")
fmt.Scanln(&direction)
direction = strings.ToLower(direction)
}
if endCriteria == "d" {
go func() {
time.Sleep(time.Duration(endCriteriaValue) * time.Minute)
os.Exit(0)
}()
}
// Run Goroutines
go searchTweets(keywordSearch, direction, tokenCred)
go statusUpdate(tweetText, endCriteria, endCriteriaValue, tokenCred)
// User can terminate by inputting "end" in console
endNow := "n"
for endNow != "end" {
fmt.Println(">> End Now? (end)")
fmt.Scanln(&endNow)
if endNow == "end" {
// TODO: Maybe dump some files
}
}
}
func searchTweets(keywordSearch string, direction string, tokenCred *oauth.Credentials) {
k := 0
var ramainingRequests, resetTimeStamp int64 = 0, 0
var maxId, minId int64 = 0, 0
for {
form := url.Values{}
// Find tweets. It returns only 100 whatever be the count. So sad :( Fuck you twitter
if k == 0 {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}}
//Debug.Println("No min No max")
}
if direction == "o" {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "max_id": {strconv.FormatInt(minId-1, 10)}}
//Debug.Println("OLD: MinId = ", minId)
}
if direction == "n" {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "since_id": {strconv.FormatInt(maxId+1, 10)}}
//Debug.Println("NEW: MaxId = ", maxId)
}
if direction == "b" {
if k%2 == 0 {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "max_id": {strconv.FormatInt(minId-1, 10)}}
//Debug.Println("BOTH: MinId = ", minId)
} else {
form = url.Values{"q": {keywordSearch}, "count": {"2"}, "result_type": {"recent"}, "since_id": {strconv.FormatInt(maxId+1, 10)}}
//Debug.Println("BOTH: MaxId = ", maxId)
}
}
//fmt.Println(form)
resp, err := oauthClient.Get(http.DefaultClient, tokenCred,
"https://api.twitter.com/1.1/search/tweets.json", form)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
}
ramainingRequests, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Remaining"][0], 10, 64)
//allowedRequests, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Limit"][0], 10, 64)
resetTimeStamp, _ = strconv.ParseInt(resp.Header["X-Rate-Limit-Reset"][0], 10, 64) // converted to miliseconds
resetTimeStamp *= 1000
var srobj searchResponse
searchResponseBody, _ := ioutil.ReadAll(resp.Body)
_ = json.Unmarshal(searchResponseBody, &srobj)
for i := range srobj.Statuses {
//Debug.Println(srobj.Statuses[i].Id)
if (strings.Contains(strings.ToLower(srobj.Statuses[i].Text), "trend")) != true {
userList.Set(srobj.Statuses[i].User.Id, srobj.Statuses[i].User)
// if _, ok := replyStatuses[srobj.Statuses[i].User.Id]; !ok {
// replyStatuses[srobj.Statuses[i].User.Id] = ReplyStatus{Replied: false}
// }
if !xReplyStatuses.IsSet(srobj.Statuses[i].User.Id) {
xReplyStatuses.Initiate(srobj.Statuses[i].User.Id)
}
}
if minId == 0 {
minId = srobj.Statuses[i].Id
} else if minId > srobj.Statuses[i].Id {
minId = srobj.Statuses[i].Id
}
if maxId == 0 {
maxId = srobj.Statuses[i].Id
} else if maxId < srobj.Statuses[i].Id {
maxId = srobj.Statuses[i].Id
}
}
if resp.StatusCode != 200 {
Debug.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
Error.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
} else {
Info.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
if len(srobj.Statuses) > 0 {
if direction == "b" {
if k%2 == 0 {
Debug.Printf("%d old tweets found", len(srobj.Statuses))
} else {
Debug.Printf("%d new tweets found", len(srobj.Statuses))
}
}
if direction == "o" {
Debug.Printf("%d old tweets found", len(srobj.Statuses))
}
if direction == "n" {
Debug.Printf("%d new tweets found", len(srobj.Statuses))
}
}
}
// Insert calculated delay (Useful when there will be multiple senders)
var delay int64
if ramainingRequests != 0 {
delay = (resetTimeStamp - time.Now().UnixNano()/int64(time.Millisecond)) / ramainingRequests
} else {
delay = (resetTimeStamp - time.Now().UnixNano()/int64(time.Millisecond))
}
//fmt.Printf("Pass ends sleeping for %d seconds", delay/1000)
time.Sleep(time.Duration(delay) * time.Millisecond)
// Because there is a limit to how much tweet a user can send
// time.Sleep(5 * time.Millisecond)
k++
}
}
func statusUpdate(tweetText, endCriteria string, endCriteriaValue int, tokenCred *oauth.Credentials) {
time.Sleep(5 * time.Second) // Wait for some time till userList is populated
k := 0
totalPeopleReplied := 0
for {
// Fill tempUserList outside
// While adding stuff to tempUserList check if replyStatuses has a value
useridList := xReplyStatuses.ListUseridUnsent()
peopleReplied := 0
for len(useridList) == 0 {
time.Sleep(1 * time.Second)
}
for j := range useridList {
totalPeopleReplied++
currentUserId := useridList[j]
currentUser := userList.Get(currentUserId)
processedTweetText := strings.Replace(tweetText, "[user]", "@"+currentUser.ScreenName, -1) + " #" + randSeq(3)
formTweet := url.Values{"status": {processedTweetText}}
resp, err := oauthClient.Post(http.DefaultClient, tokenCred,
"https://api.twitter.com/1.1/statuses/update.json", formTweet)
searchResponseBody, _ := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
// } else {
// contents, _ := ioutil.ReadAll(resp.Body)
// fmt.Printf("%s\n", contents)
}
xReplyStatuses.Sent(currentUserId)
if resp.StatusCode != 200 {
Debug.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
Error.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
} else {
Info.Printf("%s %s %s", searchResponseBody, resp.Header, resp.Status)
//Debug.Printf("%s", processedTweetText)
}
if resp.StatusCode == 403 {
var errobj TwitterResponseError
_ = json.Unmarshal(searchResponseBody, &errobj)
if errobj.errors[0].Code == 185 {
// TODO: Daily user limit reached
// Stop for half an hour
Debug.Printf("Tweet limit reached, Sleeping for half an hour")
time.Sleep(30 * time.Minute)
}
if errobj.errors[0].Code == 226 {
// Inform user to create another campaign and exit
Debug.Printf("Seems like twitter has detected your campaign as spam try another campign, may be another user too")
os.Exit(0)
}
}
if endCriteria == "t" && totalPeopleReplied >= endCriteriaValue {
fmt.Println("Exiting because end criteria reached. Number of tweets replied : ", totalPeopleReplied)
os.Exit(0)
}
peopleReplied++
// User can only send 50 request per half-an-hour (Post every 36 seconds)
time.Sleep(time.Duration(rand.Intn(5)+3) * time.Second) // Wait for 3 to 7 seconds
}
k++
Debug.Printf("%d people replied", peopleReplied)
}
}
| {
b, err := ioutil.ReadFile(*credPath)
if err != nil {
return err
}
return json.Unmarshal(b, &oauthClient.Credentials)
} | identifier_body |
openapi.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package builder3
import (
"encoding/json"
"fmt"
"net/http"
"strings"
restful "github.com/emicklei/go-restful/v3"
builderutil "k8s.io/kube-openapi/pkg/builder3/util"
"k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/common/restfuladapter"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/util"
"k8s.io/kube-openapi/pkg/validation/spec"
)
const (
OpenAPIVersion = "3.0"
)
type openAPI struct {
config *common.OpenAPIV3Config
spec *spec3.OpenAPI
definitions map[string]common.OpenAPIDefinition
}
func groupRoutesByPath(routes []common.Route) map[string][]common.Route {
pathToRoutes := make(map[string][]common.Route)
for _, r := range routes {
pathToRoutes[r.Path()] = append(pathToRoutes[r.Path()], r)
}
return pathToRoutes
}
func (o *openAPI) buildResponse(model interface{}, description string, content []string) (*spec3.Response, error) {
response := &spec3.Response{
ResponseProps: spec3.ResponseProps{
Description: description,
Content: make(map[string]*spec3.MediaType),
},
}
s, err := o.toSchema(util.GetCanonicalTypeName(model))
if err != nil {
return nil, err
}
for _, contentType := range content {
response.ResponseProps.Content[contentType] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: s,
},
}
}
return response, nil
}
func (o *openAPI) buildOperations(route common.Route, inPathCommonParamsMap map[interface{}]*spec3.Parameter) (*spec3.Operation, error) |
func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []string, bodySample interface{}) (*spec3.RequestBody, error) {
for _, param := range parameters {
if param.Kind() == common.BodyParameterKind && bodySample != nil {
schema, err := o.toSchema(util.GetCanonicalTypeName(bodySample))
if err != nil {
return nil, err
}
r := &spec3.RequestBody{
RequestBodyProps: spec3.RequestBodyProps{
Content: map[string]*spec3.MediaType{},
},
}
for _, consume := range consumes {
r.Content[consume] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: schema,
},
}
}
return r, nil
}
}
return nil, nil
}
func newOpenAPI(config *common.Config) openAPI {
o := openAPI{
config: common.ConvertConfigToV3(config),
spec: &spec3.OpenAPI{
Version: "3.0.0",
Info: config.Info,
Paths: &spec3.Paths{
Paths: map[string]*spec3.Path{},
},
Components: &spec3.Components{
Schemas: map[string]*spec.Schema{},
},
},
}
if len(o.config.ResponseDefinitions) > 0 {
o.spec.Components.Responses = make(map[string]*spec3.Response)
}
for k, response := range o.config.ResponseDefinitions {
o.spec.Components.Responses[k] = response
}
if len(o.config.SecuritySchemes) > 0 {
o.spec.Components.SecuritySchemes = make(spec3.SecuritySchemes)
}
for k, securityScheme := range o.config.SecuritySchemes {
o.spec.Components.SecuritySchemes[k] = securityScheme
}
if o.config.GetOperationIDAndTagsFromRoute == nil {
// Map the deprecated handler to the common interface, if provided.
if o.config.GetOperationIDAndTags != nil {
o.config.GetOperationIDAndTagsFromRoute = func(r common.Route) (string, []string, error) {
restfulRouteAdapter, ok := r.(*restfuladapter.RouteAdapter)
if !ok {
return "", nil, fmt.Errorf("config.GetOperationIDAndTags specified but route is not a restful v1 Route")
}
return o.config.GetOperationIDAndTags(restfulRouteAdapter.Route)
}
} else {
o.config.GetOperationIDAndTagsFromRoute = func(r common.Route) (string, []string, error) {
return r.OperationName(), nil, nil
}
}
}
if o.config.GetDefinitionName == nil {
o.config.GetDefinitionName = func(name string) (string, spec.Extensions) {
return name[strings.LastIndex(name, "/")+1:], nil
}
}
if o.config.Definitions != nil {
o.definitions = o.config.Definitions
} else {
o.definitions = o.config.GetDefinitions(func(name string) spec.Ref {
defName, _ := o.config.GetDefinitionName(name)
return spec.MustCreateRef("#/components/schemas/" + common.EscapeJsonPointer(defName))
})
}
return o
}
func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error {
pathsToIgnore := util.NewTrie(o.config.IgnorePrefixes)
for _, w := range webServices {
rootPath := w.RootPath()
if pathsToIgnore.HasPrefix(rootPath) {
continue
}
commonParams, err := o.buildParameters(w.PathParameters())
if err != nil {
return err
}
for path, routes := range groupRoutesByPath(w.Routes()) {
// go-swagger has special variable definition {$NAME:*} that can only be
// used at the end of the path and it is not recognized by OpenAPI.
if strings.HasSuffix(path, ":*}") {
path = path[:len(path)-3] + "}"
}
if pathsToIgnore.HasPrefix(path) {
continue
}
// Aggregating common parameters make API spec (and generated clients) simpler
inPathCommonParamsMap, err := o.findCommonParameters(routes)
if err != nil {
return err
}
pathItem, exists := o.spec.Paths.Paths[path]
if exists {
return fmt.Errorf("duplicate webservice route has been found for path: %v", path)
}
pathItem = &spec3.Path{
PathProps: spec3.PathProps{},
}
// add web services's parameters as well as any parameters appears in all ops, as common parameters
pathItem.Parameters = append(pathItem.Parameters, commonParams...)
for _, p := range inPathCommonParamsMap {
pathItem.Parameters = append(pathItem.Parameters, p)
}
sortParameters(pathItem.Parameters)
for _, route := range routes {
op, _ := o.buildOperations(route, inPathCommonParamsMap)
sortParameters(op.Parameters)
switch strings.ToUpper(route.Method()) {
case "GET":
pathItem.Get = op
case "POST":
pathItem.Post = op
case "HEAD":
pathItem.Head = op
case "PUT":
pathItem.Put = op
case "DELETE":
pathItem.Delete = op
case "OPTIONS":
pathItem.Options = op
case "PATCH":
pathItem.Patch = op
}
}
o.spec.Paths.Paths[path] = pathItem
}
}
return nil
}
// BuildOpenAPISpec builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
//
// Deprecated: BuildOpenAPISpecFromRoutes should be used instead.
func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) {
return BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config)
}
// BuildOpenAPISpecFromRoutes builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.Config) (*spec3.OpenAPI, error) {
a := newOpenAPI(config)
err := a.buildOpenAPISpec(webServices)
if err != nil {
return nil, err
}
return a.spec, nil
}
// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it.
// BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the
// passed type names.
func BuildOpenAPIDefinitionsForResources(config *common.Config, names ...string) (map[string]*spec.Schema, error) {
o := newOpenAPI(config)
// We can discard the return value of toSchema because all we care about is the side effect of calling it.
// All the models created for this resource get added to o.swagger.Definitions
for _, name := range names {
_, err := o.toSchema(name)
if err != nil {
return nil, err
}
}
return o.spec.Components.Schemas, nil
}
func (o *openAPI) findCommonParameters(routes []common.Route) (map[interface{}]*spec3.Parameter, error) {
commonParamsMap := make(map[interface{}]*spec3.Parameter, 0)
paramOpsCountByName := make(map[interface{}]int, 0)
paramNameKindToDataMap := make(map[interface{}]common.Parameter, 0)
for _, route := range routes {
routeParamDuplicateMap := make(map[interface{}]bool)
s := ""
params := route.Parameters()
for _, param := range params {
m, _ := json.Marshal(param)
s += string(m) + "\n"
key := mapKeyFromParam(param)
if routeParamDuplicateMap[key] {
msg, _ := json.Marshal(params)
return commonParamsMap, fmt.Errorf("duplicate parameter %v for route %v, %v", param.Name(), string(msg), s)
}
routeParamDuplicateMap[key] = true
paramOpsCountByName[key]++
paramNameKindToDataMap[key] = param
}
}
for key, count := range paramOpsCountByName {
paramData := paramNameKindToDataMap[key]
if count == len(routes) && paramData.Kind() != common.BodyParameterKind {
openAPIParam, err := o.buildParameter(paramData)
if err != nil {
return commonParamsMap, err
}
commonParamsMap[key] = openAPIParam
}
}
return commonParamsMap, nil
}
func (o *openAPI) buildParameters(restParam []common.Parameter) (ret []*spec3.Parameter, err error) {
ret = make([]*spec3.Parameter, len(restParam))
for i, v := range restParam {
ret[i], err = o.buildParameter(v)
if err != nil {
return ret, err
}
}
return ret, nil
}
func (o *openAPI) buildParameter(restParam common.Parameter) (ret *spec3.Parameter, err error) {
ret = &spec3.Parameter{
ParameterProps: spec3.ParameterProps{
Name: restParam.Name(),
Description: restParam.Description(),
Required: restParam.Required(),
},
}
switch restParam.Kind() {
case common.BodyParameterKind:
return nil, nil
case common.PathParameterKind:
ret.In = "path"
if !restParam.Required() {
return ret, fmt.Errorf("path parameters should be marked as required for parameter %v", restParam)
}
case common.QueryParameterKind:
ret.In = "query"
case common.HeaderParameterKind:
ret.In = "header"
/* TODO: add support for the cookie param */
default:
return ret, fmt.Errorf("unsupported restful parameter kind : %v", restParam.Kind())
}
openAPIType, openAPIFormat := common.OpenAPITypeFormat(restParam.DataType())
if openAPIType == "" {
return ret, fmt.Errorf("non-body Restful parameter type should be a simple type, but got : %v", restParam.DataType())
}
ret.Schema = &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{openAPIType},
Format: openAPIFormat,
UniqueItems: !restParam.AllowMultiple(),
},
}
return ret, nil
}
func (o *openAPI) buildDefinitionRecursively(name string) error {
uniqueName, extensions := o.config.GetDefinitionName(name)
if _, ok := o.spec.Components.Schemas[uniqueName]; ok {
return nil
}
if item, ok := o.definitions[name]; ok {
schema := &spec.Schema{
VendorExtensible: item.Schema.VendorExtensible,
SchemaProps: item.Schema.SchemaProps,
SwaggerSchemaProps: item.Schema.SwaggerSchemaProps,
}
if extensions != nil {
if schema.Extensions == nil {
schema.Extensions = spec.Extensions{}
}
for k, v := range extensions {
schema.Extensions[k] = v
}
}
// delete the embedded v2 schema if exists, otherwise no-op
delete(schema.VendorExtensible.Extensions, common.ExtensionV2Schema)
schema = builderutil.WrapRefs(schema)
o.spec.Components.Schemas[uniqueName] = schema
for _, v := range item.Dependencies {
if err := o.buildDefinitionRecursively(v); err != nil {
return err
}
}
} else {
return fmt.Errorf("cannot find model definition for %v. If you added a new type, you may need to add +k8s:openapi-gen=true to the package or type and run code-gen again", name)
}
return nil
}
func (o *openAPI) buildDefinitionForType(name string) (string, error) {
if err := o.buildDefinitionRecursively(name); err != nil {
return "", err
}
defName, _ := o.config.GetDefinitionName(name)
return "#/components/schemas/" + common.EscapeJsonPointer(defName), nil
}
func (o *openAPI) toSchema(name string) (_ *spec.Schema, err error) {
if openAPIType, openAPIFormat := common.OpenAPITypeFormat(name); openAPIType != "" {
return &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{openAPIType},
Format: openAPIFormat,
},
}, nil
} else {
ref, err := o.buildDefinitionForType(name)
if err != nil {
return nil, err
}
return &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: spec.MustCreateRef(ref),
},
}, nil
}
}
| {
ret := &spec3.Operation{
OperationProps: spec3.OperationProps{
Description: route.Description(),
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
StatusCodeResponses: make(map[int]*spec3.Response),
},
},
},
}
for k, v := range route.Metadata() {
if strings.HasPrefix(k, common.ExtensionPrefix) {
if ret.Extensions == nil {
ret.Extensions = spec.Extensions{}
}
ret.Extensions.Add(k, v)
}
}
var err error
if ret.OperationId, ret.Tags, err = o.config.GetOperationIDAndTagsFromRoute(route); err != nil {
return ret, err
}
// Build responses
for _, resp := range route.StatusCodeResponses() {
ret.Responses.StatusCodeResponses[resp.Code()], err = o.buildResponse(resp.Model(), resp.Message(), route.Produces())
if err != nil {
return ret, err
}
}
// If there is no response but a write sample, assume that write sample is an http.StatusOK response.
if len(ret.Responses.StatusCodeResponses) == 0 && route.ResponsePayloadSample() != nil {
ret.Responses.StatusCodeResponses[http.StatusOK], err = o.buildResponse(route.ResponsePayloadSample(), "OK", route.Produces())
if err != nil {
return ret, err
}
}
for code, resp := range o.config.CommonResponses {
if _, exists := ret.Responses.StatusCodeResponses[code]; !exists {
ret.Responses.StatusCodeResponses[code] = resp
}
}
if len(ret.Responses.StatusCodeResponses) == 0 {
ret.Responses.Default = o.config.DefaultResponse
}
params := route.Parameters()
for _, param := range params {
_, isCommon := inPathCommonParamsMap[mapKeyFromParam(param)]
if !isCommon && param.Kind() != common.BodyParameterKind {
openAPIParam, err := o.buildParameter(param)
if err != nil {
return ret, err
}
ret.Parameters = append(ret.Parameters, openAPIParam)
}
}
body, err := o.buildRequestBody(params, route.Consumes(), route.RequestPayloadSample())
if err != nil {
return nil, err
}
if body != nil {
ret.RequestBody = body
}
return ret, nil
} | identifier_body |
openapi.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package builder3
import (
"encoding/json"
"fmt"
"net/http"
"strings"
restful "github.com/emicklei/go-restful/v3"
builderutil "k8s.io/kube-openapi/pkg/builder3/util"
"k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/common/restfuladapter"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/util"
"k8s.io/kube-openapi/pkg/validation/spec"
)
const (
OpenAPIVersion = "3.0"
)
type openAPI struct {
config *common.OpenAPIV3Config
spec *spec3.OpenAPI
definitions map[string]common.OpenAPIDefinition
}
func groupRoutesByPath(routes []common.Route) map[string][]common.Route {
pathToRoutes := make(map[string][]common.Route)
for _, r := range routes {
pathToRoutes[r.Path()] = append(pathToRoutes[r.Path()], r)
}
return pathToRoutes
}
func (o *openAPI) buildResponse(model interface{}, description string, content []string) (*spec3.Response, error) {
response := &spec3.Response{
ResponseProps: spec3.ResponseProps{
Description: description,
Content: make(map[string]*spec3.MediaType),
},
}
s, err := o.toSchema(util.GetCanonicalTypeName(model))
if err != nil {
return nil, err
}
for _, contentType := range content {
response.ResponseProps.Content[contentType] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: s,
},
}
}
return response, nil
}
func (o *openAPI) buildOperations(route common.Route, inPathCommonParamsMap map[interface{}]*spec3.Parameter) (*spec3.Operation, error) {
ret := &spec3.Operation{
OperationProps: spec3.OperationProps{
Description: route.Description(),
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
StatusCodeResponses: make(map[int]*spec3.Response),
},
},
},
}
for k, v := range route.Metadata() {
if strings.HasPrefix(k, common.ExtensionPrefix) {
if ret.Extensions == nil {
ret.Extensions = spec.Extensions{}
}
ret.Extensions.Add(k, v)
}
}
var err error
if ret.OperationId, ret.Tags, err = o.config.GetOperationIDAndTagsFromRoute(route); err != nil {
return ret, err
}
// Build responses
for _, resp := range route.StatusCodeResponses() {
ret.Responses.StatusCodeResponses[resp.Code()], err = o.buildResponse(resp.Model(), resp.Message(), route.Produces())
if err != nil {
return ret, err
}
}
// If there is no response but a write sample, assume that write sample is an http.StatusOK response.
if len(ret.Responses.StatusCodeResponses) == 0 && route.ResponsePayloadSample() != nil {
ret.Responses.StatusCodeResponses[http.StatusOK], err = o.buildResponse(route.ResponsePayloadSample(), "OK", route.Produces())
if err != nil {
return ret, err
}
}
for code, resp := range o.config.CommonResponses {
if _, exists := ret.Responses.StatusCodeResponses[code]; !exists {
ret.Responses.StatusCodeResponses[code] = resp
}
}
if len(ret.Responses.StatusCodeResponses) == 0 {
ret.Responses.Default = o.config.DefaultResponse
}
params := route.Parameters()
for _, param := range params {
_, isCommon := inPathCommonParamsMap[mapKeyFromParam(param)]
if !isCommon && param.Kind() != common.BodyParameterKind {
openAPIParam, err := o.buildParameter(param)
if err != nil {
return ret, err
}
ret.Parameters = append(ret.Parameters, openAPIParam)
}
}
body, err := o.buildRequestBody(params, route.Consumes(), route.RequestPayloadSample())
if err != nil {
return nil, err
}
if body != nil {
ret.RequestBody = body
}
return ret, nil
}
func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []string, bodySample interface{}) (*spec3.RequestBody, error) {
for _, param := range parameters {
if param.Kind() == common.BodyParameterKind && bodySample != nil {
schema, err := o.toSchema(util.GetCanonicalTypeName(bodySample))
if err != nil {
return nil, err
}
r := &spec3.RequestBody{
RequestBodyProps: spec3.RequestBodyProps{
Content: map[string]*spec3.MediaType{},
},
}
for _, consume := range consumes {
r.Content[consume] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: schema,
},
}
}
return r, nil
}
}
return nil, nil
}
func newOpenAPI(config *common.Config) openAPI {
o := openAPI{
config: common.ConvertConfigToV3(config),
spec: &spec3.OpenAPI{
Version: "3.0.0",
Info: config.Info,
Paths: &spec3.Paths{
Paths: map[string]*spec3.Path{},
},
Components: &spec3.Components{
Schemas: map[string]*spec.Schema{},
},
},
}
if len(o.config.ResponseDefinitions) > 0 {
o.spec.Components.Responses = make(map[string]*spec3.Response)
}
for k, response := range o.config.ResponseDefinitions {
o.spec.Components.Responses[k] = response
}
if len(o.config.SecuritySchemes) > 0 {
o.spec.Components.SecuritySchemes = make(spec3.SecuritySchemes)
}
for k, securityScheme := range o.config.SecuritySchemes {
o.spec.Components.SecuritySchemes[k] = securityScheme
}
if o.config.GetOperationIDAndTagsFromRoute == nil {
// Map the deprecated handler to the common interface, if provided.
if o.config.GetOperationIDAndTags != nil {
o.config.GetOperationIDAndTagsFromRoute = func(r common.Route) (string, []string, error) {
restfulRouteAdapter, ok := r.(*restfuladapter.RouteAdapter)
if !ok {
return "", nil, fmt.Errorf("config.GetOperationIDAndTags specified but route is not a restful v1 Route")
}
return o.config.GetOperationIDAndTags(restfulRouteAdapter.Route)
}
} else {
o.config.GetOperationIDAndTagsFromRoute = func(r common.Route) (string, []string, error) {
return r.OperationName(), nil, nil
}
}
}
if o.config.GetDefinitionName == nil {
o.config.GetDefinitionName = func(name string) (string, spec.Extensions) {
return name[strings.LastIndex(name, "/")+1:], nil
}
}
if o.config.Definitions != nil {
o.definitions = o.config.Definitions
} else {
o.definitions = o.config.GetDefinitions(func(name string) spec.Ref {
defName, _ := o.config.GetDefinitionName(name)
return spec.MustCreateRef("#/components/schemas/" + common.EscapeJsonPointer(defName))
})
}
return o
}
func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error {
pathsToIgnore := util.NewTrie(o.config.IgnorePrefixes)
for _, w := range webServices {
rootPath := w.RootPath()
if pathsToIgnore.HasPrefix(rootPath) {
continue
}
commonParams, err := o.buildParameters(w.PathParameters())
if err != nil {
return err
}
for path, routes := range groupRoutesByPath(w.Routes()) {
// go-swagger has special variable definition {$NAME:*} that can only be
// used at the end of the path and it is not recognized by OpenAPI.
if strings.HasSuffix(path, ":*}") {
path = path[:len(path)-3] + "}"
}
if pathsToIgnore.HasPrefix(path) {
continue
}
// Aggregating common parameters make API spec (and generated clients) simpler
inPathCommonParamsMap, err := o.findCommonParameters(routes)
if err != nil {
return err
}
pathItem, exists := o.spec.Paths.Paths[path]
if exists {
return fmt.Errorf("duplicate webservice route has been found for path: %v", path)
}
pathItem = &spec3.Path{
PathProps: spec3.PathProps{},
}
// add web services's parameters as well as any parameters appears in all ops, as common parameters
pathItem.Parameters = append(pathItem.Parameters, commonParams...)
for _, p := range inPathCommonParamsMap {
pathItem.Parameters = append(pathItem.Parameters, p)
}
sortParameters(pathItem.Parameters)
for _, route := range routes {
op, _ := o.buildOperations(route, inPathCommonParamsMap)
sortParameters(op.Parameters)
switch strings.ToUpper(route.Method()) {
case "GET":
pathItem.Get = op
case "POST":
pathItem.Post = op
case "HEAD":
pathItem.Head = op
case "PUT":
pathItem.Put = op
case "DELETE":
pathItem.Delete = op
case "OPTIONS":
pathItem.Options = op
case "PATCH":
pathItem.Patch = op
}
}
o.spec.Paths.Paths[path] = pathItem
}
}
return nil
}
// BuildOpenAPISpec builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
//
// Deprecated: BuildOpenAPISpecFromRoutes should be used instead.
func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) {
return BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config)
}
// BuildOpenAPISpecFromRoutes builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.Config) (*spec3.OpenAPI, error) {
a := newOpenAPI(config)
err := a.buildOpenAPISpec(webServices)
if err != nil {
return nil, err
}
return a.spec, nil
}
// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it.
// BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the
// passed type names.
func | (config *common.Config, names ...string) (map[string]*spec.Schema, error) {
o := newOpenAPI(config)
// We can discard the return value of toSchema because all we care about is the side effect of calling it.
// All the models created for this resource get added to o.swagger.Definitions
for _, name := range names {
_, err := o.toSchema(name)
if err != nil {
return nil, err
}
}
return o.spec.Components.Schemas, nil
}
func (o *openAPI) findCommonParameters(routes []common.Route) (map[interface{}]*spec3.Parameter, error) {
commonParamsMap := make(map[interface{}]*spec3.Parameter, 0)
paramOpsCountByName := make(map[interface{}]int, 0)
paramNameKindToDataMap := make(map[interface{}]common.Parameter, 0)
for _, route := range routes {
routeParamDuplicateMap := make(map[interface{}]bool)
s := ""
params := route.Parameters()
for _, param := range params {
m, _ := json.Marshal(param)
s += string(m) + "\n"
key := mapKeyFromParam(param)
if routeParamDuplicateMap[key] {
msg, _ := json.Marshal(params)
return commonParamsMap, fmt.Errorf("duplicate parameter %v for route %v, %v", param.Name(), string(msg), s)
}
routeParamDuplicateMap[key] = true
paramOpsCountByName[key]++
paramNameKindToDataMap[key] = param
}
}
for key, count := range paramOpsCountByName {
paramData := paramNameKindToDataMap[key]
if count == len(routes) && paramData.Kind() != common.BodyParameterKind {
openAPIParam, err := o.buildParameter(paramData)
if err != nil {
return commonParamsMap, err
}
commonParamsMap[key] = openAPIParam
}
}
return commonParamsMap, nil
}
func (o *openAPI) buildParameters(restParam []common.Parameter) (ret []*spec3.Parameter, err error) {
ret = make([]*spec3.Parameter, len(restParam))
for i, v := range restParam {
ret[i], err = o.buildParameter(v)
if err != nil {
return ret, err
}
}
return ret, nil
}
func (o *openAPI) buildParameter(restParam common.Parameter) (ret *spec3.Parameter, err error) {
ret = &spec3.Parameter{
ParameterProps: spec3.ParameterProps{
Name: restParam.Name(),
Description: restParam.Description(),
Required: restParam.Required(),
},
}
switch restParam.Kind() {
case common.BodyParameterKind:
return nil, nil
case common.PathParameterKind:
ret.In = "path"
if !restParam.Required() {
return ret, fmt.Errorf("path parameters should be marked as required for parameter %v", restParam)
}
case common.QueryParameterKind:
ret.In = "query"
case common.HeaderParameterKind:
ret.In = "header"
/* TODO: add support for the cookie param */
default:
return ret, fmt.Errorf("unsupported restful parameter kind : %v", restParam.Kind())
}
openAPIType, openAPIFormat := common.OpenAPITypeFormat(restParam.DataType())
if openAPIType == "" {
return ret, fmt.Errorf("non-body Restful parameter type should be a simple type, but got : %v", restParam.DataType())
}
ret.Schema = &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{openAPIType},
Format: openAPIFormat,
UniqueItems: !restParam.AllowMultiple(),
},
}
return ret, nil
}
func (o *openAPI) buildDefinitionRecursively(name string) error {
uniqueName, extensions := o.config.GetDefinitionName(name)
if _, ok := o.spec.Components.Schemas[uniqueName]; ok {
return nil
}
if item, ok := o.definitions[name]; ok {
schema := &spec.Schema{
VendorExtensible: item.Schema.VendorExtensible,
SchemaProps: item.Schema.SchemaProps,
SwaggerSchemaProps: item.Schema.SwaggerSchemaProps,
}
if extensions != nil {
if schema.Extensions == nil {
schema.Extensions = spec.Extensions{}
}
for k, v := range extensions {
schema.Extensions[k] = v
}
}
// delete the embedded v2 schema if exists, otherwise no-op
delete(schema.VendorExtensible.Extensions, common.ExtensionV2Schema)
schema = builderutil.WrapRefs(schema)
o.spec.Components.Schemas[uniqueName] = schema
for _, v := range item.Dependencies {
if err := o.buildDefinitionRecursively(v); err != nil {
return err
}
}
} else {
return fmt.Errorf("cannot find model definition for %v. If you added a new type, you may need to add +k8s:openapi-gen=true to the package or type and run code-gen again", name)
}
return nil
}
func (o *openAPI) buildDefinitionForType(name string) (string, error) {
if err := o.buildDefinitionRecursively(name); err != nil {
return "", err
}
defName, _ := o.config.GetDefinitionName(name)
return "#/components/schemas/" + common.EscapeJsonPointer(defName), nil
}
func (o *openAPI) toSchema(name string) (_ *spec.Schema, err error) {
if openAPIType, openAPIFormat := common.OpenAPITypeFormat(name); openAPIType != "" {
return &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{openAPIType},
Format: openAPIFormat,
},
}, nil
} else {
ref, err := o.buildDefinitionForType(name)
if err != nil {
return nil, err
}
return &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: spec.MustCreateRef(ref),
},
}, nil
}
}
| BuildOpenAPIDefinitionsForResources | identifier_name |
openapi.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package builder3
import (
"encoding/json"
"fmt"
"net/http"
"strings"
restful "github.com/emicklei/go-restful/v3"
builderutil "k8s.io/kube-openapi/pkg/builder3/util"
"k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/common/restfuladapter"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/util"
"k8s.io/kube-openapi/pkg/validation/spec"
)
const (
OpenAPIVersion = "3.0"
)
type openAPI struct {
config *common.OpenAPIV3Config
spec *spec3.OpenAPI
definitions map[string]common.OpenAPIDefinition
}
func groupRoutesByPath(routes []common.Route) map[string][]common.Route {
pathToRoutes := make(map[string][]common.Route)
for _, r := range routes {
pathToRoutes[r.Path()] = append(pathToRoutes[r.Path()], r)
}
return pathToRoutes
}
func (o *openAPI) buildResponse(model interface{}, description string, content []string) (*spec3.Response, error) {
response := &spec3.Response{
ResponseProps: spec3.ResponseProps{
Description: description,
Content: make(map[string]*spec3.MediaType),
},
}
s, err := o.toSchema(util.GetCanonicalTypeName(model))
if err != nil {
return nil, err
}
for _, contentType := range content {
response.ResponseProps.Content[contentType] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: s,
},
}
}
return response, nil
}
func (o *openAPI) buildOperations(route common.Route, inPathCommonParamsMap map[interface{}]*spec3.Parameter) (*spec3.Operation, error) {
ret := &spec3.Operation{
OperationProps: spec3.OperationProps{
Description: route.Description(),
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
StatusCodeResponses: make(map[int]*spec3.Response),
},
},
},
}
for k, v := range route.Metadata() {
if strings.HasPrefix(k, common.ExtensionPrefix) {
if ret.Extensions == nil {
ret.Extensions = spec.Extensions{}
}
ret.Extensions.Add(k, v)
}
}
var err error
if ret.OperationId, ret.Tags, err = o.config.GetOperationIDAndTagsFromRoute(route); err != nil {
return ret, err
}
// Build responses
for _, resp := range route.StatusCodeResponses() {
ret.Responses.StatusCodeResponses[resp.Code()], err = o.buildResponse(resp.Model(), resp.Message(), route.Produces())
if err != nil {
return ret, err
}
}
// If there is no response but a write sample, assume that write sample is an http.StatusOK response.
if len(ret.Responses.StatusCodeResponses) == 0 && route.ResponsePayloadSample() != nil {
ret.Responses.StatusCodeResponses[http.StatusOK], err = o.buildResponse(route.ResponsePayloadSample(), "OK", route.Produces())
if err != nil {
return ret, err
}
}
for code, resp := range o.config.CommonResponses {
if _, exists := ret.Responses.StatusCodeResponses[code]; !exists {
ret.Responses.StatusCodeResponses[code] = resp
}
}
if len(ret.Responses.StatusCodeResponses) == 0 {
ret.Responses.Default = o.config.DefaultResponse
}
params := route.Parameters()
for _, param := range params {
_, isCommon := inPathCommonParamsMap[mapKeyFromParam(param)]
if !isCommon && param.Kind() != common.BodyParameterKind {
openAPIParam, err := o.buildParameter(param)
if err != nil {
return ret, err
}
ret.Parameters = append(ret.Parameters, openAPIParam)
}
}
body, err := o.buildRequestBody(params, route.Consumes(), route.RequestPayloadSample())
if err != nil {
return nil, err
}
if body != nil {
ret.RequestBody = body
}
return ret, nil
}
func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []string, bodySample interface{}) (*spec3.RequestBody, error) {
for _, param := range parameters {
if param.Kind() == common.BodyParameterKind && bodySample != nil {
schema, err := o.toSchema(util.GetCanonicalTypeName(bodySample))
if err != nil {
return nil, err
}
r := &spec3.RequestBody{
RequestBodyProps: spec3.RequestBodyProps{
Content: map[string]*spec3.MediaType{},
},
}
for _, consume := range consumes {
r.Content[consume] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: schema,
},
}
}
return r, nil
}
}
return nil, nil
}
func newOpenAPI(config *common.Config) openAPI {
o := openAPI{
config: common.ConvertConfigToV3(config),
spec: &spec3.OpenAPI{
Version: "3.0.0",
Info: config.Info,
Paths: &spec3.Paths{
Paths: map[string]*spec3.Path{},
},
Components: &spec3.Components{
Schemas: map[string]*spec.Schema{},
},
},
}
if len(o.config.ResponseDefinitions) > 0 {
o.spec.Components.Responses = make(map[string]*spec3.Response)
}
for k, response := range o.config.ResponseDefinitions {
o.spec.Components.Responses[k] = response
}
if len(o.config.SecuritySchemes) > 0 {
o.spec.Components.SecuritySchemes = make(spec3.SecuritySchemes)
}
for k, securityScheme := range o.config.SecuritySchemes {
o.spec.Components.SecuritySchemes[k] = securityScheme
}
if o.config.GetOperationIDAndTagsFromRoute == nil {
// Map the deprecated handler to the common interface, if provided.
if o.config.GetOperationIDAndTags != nil {
o.config.GetOperationIDAndTagsFromRoute = func(r common.Route) (string, []string, error) {
restfulRouteAdapter, ok := r.(*restfuladapter.RouteAdapter)
if !ok {
return "", nil, fmt.Errorf("config.GetOperationIDAndTags specified but route is not a restful v1 Route")
}
return o.config.GetOperationIDAndTags(restfulRouteAdapter.Route)
}
} else {
o.config.GetOperationIDAndTagsFromRoute = func(r common.Route) (string, []string, error) {
return r.OperationName(), nil, nil
}
}
}
if o.config.GetDefinitionName == nil {
o.config.GetDefinitionName = func(name string) (string, spec.Extensions) {
return name[strings.LastIndex(name, "/")+1:], nil
}
}
if o.config.Definitions != nil {
o.definitions = o.config.Definitions
} else {
o.definitions = o.config.GetDefinitions(func(name string) spec.Ref {
defName, _ := o.config.GetDefinitionName(name)
return spec.MustCreateRef("#/components/schemas/" + common.EscapeJsonPointer(defName))
})
}
return o
}
func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error {
pathsToIgnore := util.NewTrie(o.config.IgnorePrefixes)
for _, w := range webServices {
rootPath := w.RootPath()
if pathsToIgnore.HasPrefix(rootPath) {
continue
}
commonParams, err := o.buildParameters(w.PathParameters())
if err != nil {
return err
}
for path, routes := range groupRoutesByPath(w.Routes()) {
// go-swagger has special variable definition {$NAME:*} that can only be
// used at the end of the path and it is not recognized by OpenAPI.
if strings.HasSuffix(path, ":*}") {
path = path[:len(path)-3] + "}"
}
if pathsToIgnore.HasPrefix(path) {
continue
}
// Aggregating common parameters make API spec (and generated clients) simpler
inPathCommonParamsMap, err := o.findCommonParameters(routes)
if err != nil {
return err
}
pathItem, exists := o.spec.Paths.Paths[path]
if exists {
return fmt.Errorf("duplicate webservice route has been found for path: %v", path)
}
pathItem = &spec3.Path{
PathProps: spec3.PathProps{},
}
// add web services's parameters as well as any parameters appears in all ops, as common parameters
pathItem.Parameters = append(pathItem.Parameters, commonParams...)
for _, p := range inPathCommonParamsMap {
pathItem.Parameters = append(pathItem.Parameters, p)
}
sortParameters(pathItem.Parameters)
for _, route := range routes {
op, _ := o.buildOperations(route, inPathCommonParamsMap)
sortParameters(op.Parameters)
switch strings.ToUpper(route.Method()) {
case "GET": | pathItem.Head = op
case "PUT":
pathItem.Put = op
case "DELETE":
pathItem.Delete = op
case "OPTIONS":
pathItem.Options = op
case "PATCH":
pathItem.Patch = op
}
}
o.spec.Paths.Paths[path] = pathItem
}
}
return nil
}
// BuildOpenAPISpec builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
//
// Deprecated: BuildOpenAPISpecFromRoutes should be used instead.
func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) {
return BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config)
}
// BuildOpenAPISpecFromRoutes builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.Config) (*spec3.OpenAPI, error) {
a := newOpenAPI(config)
err := a.buildOpenAPISpec(webServices)
if err != nil {
return nil, err
}
return a.spec, nil
}
// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it.
// BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the
// passed type names.
func BuildOpenAPIDefinitionsForResources(config *common.Config, names ...string) (map[string]*spec.Schema, error) {
o := newOpenAPI(config)
// We can discard the return value of toSchema because all we care about is the side effect of calling it.
// All the models created for this resource get added to o.swagger.Definitions
for _, name := range names {
_, err := o.toSchema(name)
if err != nil {
return nil, err
}
}
return o.spec.Components.Schemas, nil
}
func (o *openAPI) findCommonParameters(routes []common.Route) (map[interface{}]*spec3.Parameter, error) {
commonParamsMap := make(map[interface{}]*spec3.Parameter, 0)
paramOpsCountByName := make(map[interface{}]int, 0)
paramNameKindToDataMap := make(map[interface{}]common.Parameter, 0)
for _, route := range routes {
routeParamDuplicateMap := make(map[interface{}]bool)
s := ""
params := route.Parameters()
for _, param := range params {
m, _ := json.Marshal(param)
s += string(m) + "\n"
key := mapKeyFromParam(param)
if routeParamDuplicateMap[key] {
msg, _ := json.Marshal(params)
return commonParamsMap, fmt.Errorf("duplicate parameter %v for route %v, %v", param.Name(), string(msg), s)
}
routeParamDuplicateMap[key] = true
paramOpsCountByName[key]++
paramNameKindToDataMap[key] = param
}
}
for key, count := range paramOpsCountByName {
paramData := paramNameKindToDataMap[key]
if count == len(routes) && paramData.Kind() != common.BodyParameterKind {
openAPIParam, err := o.buildParameter(paramData)
if err != nil {
return commonParamsMap, err
}
commonParamsMap[key] = openAPIParam
}
}
return commonParamsMap, nil
}
func (o *openAPI) buildParameters(restParam []common.Parameter) (ret []*spec3.Parameter, err error) {
ret = make([]*spec3.Parameter, len(restParam))
for i, v := range restParam {
ret[i], err = o.buildParameter(v)
if err != nil {
return ret, err
}
}
return ret, nil
}
func (o *openAPI) buildParameter(restParam common.Parameter) (ret *spec3.Parameter, err error) {
ret = &spec3.Parameter{
ParameterProps: spec3.ParameterProps{
Name: restParam.Name(),
Description: restParam.Description(),
Required: restParam.Required(),
},
}
switch restParam.Kind() {
case common.BodyParameterKind:
return nil, nil
case common.PathParameterKind:
ret.In = "path"
if !restParam.Required() {
return ret, fmt.Errorf("path parameters should be marked as required for parameter %v", restParam)
}
case common.QueryParameterKind:
ret.In = "query"
case common.HeaderParameterKind:
ret.In = "header"
/* TODO: add support for the cookie param */
default:
return ret, fmt.Errorf("unsupported restful parameter kind : %v", restParam.Kind())
}
openAPIType, openAPIFormat := common.OpenAPITypeFormat(restParam.DataType())
if openAPIType == "" {
return ret, fmt.Errorf("non-body Restful parameter type should be a simple type, but got : %v", restParam.DataType())
}
ret.Schema = &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{openAPIType},
Format: openAPIFormat,
UniqueItems: !restParam.AllowMultiple(),
},
}
return ret, nil
}
func (o *openAPI) buildDefinitionRecursively(name string) error {
uniqueName, extensions := o.config.GetDefinitionName(name)
if _, ok := o.spec.Components.Schemas[uniqueName]; ok {
return nil
}
if item, ok := o.definitions[name]; ok {
schema := &spec.Schema{
VendorExtensible: item.Schema.VendorExtensible,
SchemaProps: item.Schema.SchemaProps,
SwaggerSchemaProps: item.Schema.SwaggerSchemaProps,
}
if extensions != nil {
if schema.Extensions == nil {
schema.Extensions = spec.Extensions{}
}
for k, v := range extensions {
schema.Extensions[k] = v
}
}
// delete the embedded v2 schema if exists, otherwise no-op
delete(schema.VendorExtensible.Extensions, common.ExtensionV2Schema)
schema = builderutil.WrapRefs(schema)
o.spec.Components.Schemas[uniqueName] = schema
for _, v := range item.Dependencies {
if err := o.buildDefinitionRecursively(v); err != nil {
return err
}
}
} else {
return fmt.Errorf("cannot find model definition for %v. If you added a new type, you may need to add +k8s:openapi-gen=true to the package or type and run code-gen again", name)
}
return nil
}
func (o *openAPI) buildDefinitionForType(name string) (string, error) {
if err := o.buildDefinitionRecursively(name); err != nil {
return "", err
}
defName, _ := o.config.GetDefinitionName(name)
return "#/components/schemas/" + common.EscapeJsonPointer(defName), nil
}
func (o *openAPI) toSchema(name string) (_ *spec.Schema, err error) {
if openAPIType, openAPIFormat := common.OpenAPITypeFormat(name); openAPIType != "" {
return &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{openAPIType},
Format: openAPIFormat,
},
}, nil
} else {
ref, err := o.buildDefinitionForType(name)
if err != nil {
return nil, err
}
return &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: spec.MustCreateRef(ref),
},
}, nil
}
} | pathItem.Get = op
case "POST":
pathItem.Post = op
case "HEAD": | random_line_split |
openapi.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package builder3
import (
"encoding/json"
"fmt"
"net/http"
"strings"
restful "github.com/emicklei/go-restful/v3"
builderutil "k8s.io/kube-openapi/pkg/builder3/util"
"k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/common/restfuladapter"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/util"
"k8s.io/kube-openapi/pkg/validation/spec"
)
const (
OpenAPIVersion = "3.0"
)
type openAPI struct {
config *common.OpenAPIV3Config
spec *spec3.OpenAPI
definitions map[string]common.OpenAPIDefinition
}
func groupRoutesByPath(routes []common.Route) map[string][]common.Route {
pathToRoutes := make(map[string][]common.Route)
for _, r := range routes {
pathToRoutes[r.Path()] = append(pathToRoutes[r.Path()], r)
}
return pathToRoutes
}
func (o *openAPI) buildResponse(model interface{}, description string, content []string) (*spec3.Response, error) {
response := &spec3.Response{
ResponseProps: spec3.ResponseProps{
Description: description,
Content: make(map[string]*spec3.MediaType),
},
}
s, err := o.toSchema(util.GetCanonicalTypeName(model))
if err != nil {
return nil, err
}
for _, contentType := range content {
response.ResponseProps.Content[contentType] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: s,
},
}
}
return response, nil
}
func (o *openAPI) buildOperations(route common.Route, inPathCommonParamsMap map[interface{}]*spec3.Parameter) (*spec3.Operation, error) {
ret := &spec3.Operation{
OperationProps: spec3.OperationProps{
Description: route.Description(),
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
StatusCodeResponses: make(map[int]*spec3.Response),
},
},
},
}
for k, v := range route.Metadata() {
if strings.HasPrefix(k, common.ExtensionPrefix) {
if ret.Extensions == nil {
ret.Extensions = spec.Extensions{}
}
ret.Extensions.Add(k, v)
}
}
var err error
if ret.OperationId, ret.Tags, err = o.config.GetOperationIDAndTagsFromRoute(route); err != nil {
return ret, err
}
// Build responses
for _, resp := range route.StatusCodeResponses() {
ret.Responses.StatusCodeResponses[resp.Code()], err = o.buildResponse(resp.Model(), resp.Message(), route.Produces())
if err != nil {
return ret, err
}
}
// If there is no response but a write sample, assume that write sample is an http.StatusOK response.
if len(ret.Responses.StatusCodeResponses) == 0 && route.ResponsePayloadSample() != nil {
ret.Responses.StatusCodeResponses[http.StatusOK], err = o.buildResponse(route.ResponsePayloadSample(), "OK", route.Produces())
if err != nil {
return ret, err
}
}
for code, resp := range o.config.CommonResponses {
if _, exists := ret.Responses.StatusCodeResponses[code]; !exists {
ret.Responses.StatusCodeResponses[code] = resp
}
}
if len(ret.Responses.StatusCodeResponses) == 0 {
ret.Responses.Default = o.config.DefaultResponse
}
params := route.Parameters()
for _, param := range params {
_, isCommon := inPathCommonParamsMap[mapKeyFromParam(param)]
if !isCommon && param.Kind() != common.BodyParameterKind {
openAPIParam, err := o.buildParameter(param)
if err != nil {
return ret, err
}
ret.Parameters = append(ret.Parameters, openAPIParam)
}
}
body, err := o.buildRequestBody(params, route.Consumes(), route.RequestPayloadSample())
if err != nil {
return nil, err
}
if body != nil {
ret.RequestBody = body
}
return ret, nil
}
func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []string, bodySample interface{}) (*spec3.RequestBody, error) {
for _, param := range parameters {
if param.Kind() == common.BodyParameterKind && bodySample != nil {
schema, err := o.toSchema(util.GetCanonicalTypeName(bodySample))
if err != nil {
return nil, err
}
r := &spec3.RequestBody{
RequestBodyProps: spec3.RequestBodyProps{
Content: map[string]*spec3.MediaType{},
},
}
for _, consume := range consumes {
r.Content[consume] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: schema,
},
}
}
return r, nil
}
}
return nil, nil
}
func newOpenAPI(config *common.Config) openAPI {
o := openAPI{
config: common.ConvertConfigToV3(config),
spec: &spec3.OpenAPI{
Version: "3.0.0",
Info: config.Info,
Paths: &spec3.Paths{
Paths: map[string]*spec3.Path{},
},
Components: &spec3.Components{
Schemas: map[string]*spec.Schema{},
},
},
}
if len(o.config.ResponseDefinitions) > 0 {
o.spec.Components.Responses = make(map[string]*spec3.Response)
}
for k, response := range o.config.ResponseDefinitions {
o.spec.Components.Responses[k] = response
}
if len(o.config.SecuritySchemes) > 0 {
o.spec.Components.SecuritySchemes = make(spec3.SecuritySchemes)
}
for k, securityScheme := range o.config.SecuritySchemes |
if o.config.GetOperationIDAndTagsFromRoute == nil {
// Map the deprecated handler to the common interface, if provided.
if o.config.GetOperationIDAndTags != nil {
o.config.GetOperationIDAndTagsFromRoute = func(r common.Route) (string, []string, error) {
restfulRouteAdapter, ok := r.(*restfuladapter.RouteAdapter)
if !ok {
return "", nil, fmt.Errorf("config.GetOperationIDAndTags specified but route is not a restful v1 Route")
}
return o.config.GetOperationIDAndTags(restfulRouteAdapter.Route)
}
} else {
o.config.GetOperationIDAndTagsFromRoute = func(r common.Route) (string, []string, error) {
return r.OperationName(), nil, nil
}
}
}
if o.config.GetDefinitionName == nil {
o.config.GetDefinitionName = func(name string) (string, spec.Extensions) {
return name[strings.LastIndex(name, "/")+1:], nil
}
}
if o.config.Definitions != nil {
o.definitions = o.config.Definitions
} else {
o.definitions = o.config.GetDefinitions(func(name string) spec.Ref {
defName, _ := o.config.GetDefinitionName(name)
return spec.MustCreateRef("#/components/schemas/" + common.EscapeJsonPointer(defName))
})
}
return o
}
func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error {
pathsToIgnore := util.NewTrie(o.config.IgnorePrefixes)
for _, w := range webServices {
rootPath := w.RootPath()
if pathsToIgnore.HasPrefix(rootPath) {
continue
}
commonParams, err := o.buildParameters(w.PathParameters())
if err != nil {
return err
}
for path, routes := range groupRoutesByPath(w.Routes()) {
// go-swagger has special variable definition {$NAME:*} that can only be
// used at the end of the path and it is not recognized by OpenAPI.
if strings.HasSuffix(path, ":*}") {
path = path[:len(path)-3] + "}"
}
if pathsToIgnore.HasPrefix(path) {
continue
}
// Aggregating common parameters make API spec (and generated clients) simpler
inPathCommonParamsMap, err := o.findCommonParameters(routes)
if err != nil {
return err
}
pathItem, exists := o.spec.Paths.Paths[path]
if exists {
return fmt.Errorf("duplicate webservice route has been found for path: %v", path)
}
pathItem = &spec3.Path{
PathProps: spec3.PathProps{},
}
// add web services's parameters as well as any parameters appears in all ops, as common parameters
pathItem.Parameters = append(pathItem.Parameters, commonParams...)
for _, p := range inPathCommonParamsMap {
pathItem.Parameters = append(pathItem.Parameters, p)
}
sortParameters(pathItem.Parameters)
for _, route := range routes {
op, _ := o.buildOperations(route, inPathCommonParamsMap)
sortParameters(op.Parameters)
switch strings.ToUpper(route.Method()) {
case "GET":
pathItem.Get = op
case "POST":
pathItem.Post = op
case "HEAD":
pathItem.Head = op
case "PUT":
pathItem.Put = op
case "DELETE":
pathItem.Delete = op
case "OPTIONS":
pathItem.Options = op
case "PATCH":
pathItem.Patch = op
}
}
o.spec.Paths.Paths[path] = pathItem
}
}
return nil
}
// BuildOpenAPISpec builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
//
// Deprecated: BuildOpenAPISpecFromRoutes should be used instead.
func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) {
return BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config)
}
// BuildOpenAPISpecFromRoutes builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.Config) (*spec3.OpenAPI, error) {
a := newOpenAPI(config)
err := a.buildOpenAPISpec(webServices)
if err != nil {
return nil, err
}
return a.spec, nil
}
// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it.
// BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the
// passed type names.
func BuildOpenAPIDefinitionsForResources(config *common.Config, names ...string) (map[string]*spec.Schema, error) {
o := newOpenAPI(config)
// We can discard the return value of toSchema because all we care about is the side effect of calling it.
// All the models created for this resource get added to o.swagger.Definitions
for _, name := range names {
_, err := o.toSchema(name)
if err != nil {
return nil, err
}
}
return o.spec.Components.Schemas, nil
}
func (o *openAPI) findCommonParameters(routes []common.Route) (map[interface{}]*spec3.Parameter, error) {
commonParamsMap := make(map[interface{}]*spec3.Parameter, 0)
paramOpsCountByName := make(map[interface{}]int, 0)
paramNameKindToDataMap := make(map[interface{}]common.Parameter, 0)
for _, route := range routes {
routeParamDuplicateMap := make(map[interface{}]bool)
s := ""
params := route.Parameters()
for _, param := range params {
m, _ := json.Marshal(param)
s += string(m) + "\n"
key := mapKeyFromParam(param)
if routeParamDuplicateMap[key] {
msg, _ := json.Marshal(params)
return commonParamsMap, fmt.Errorf("duplicate parameter %v for route %v, %v", param.Name(), string(msg), s)
}
routeParamDuplicateMap[key] = true
paramOpsCountByName[key]++
paramNameKindToDataMap[key] = param
}
}
for key, count := range paramOpsCountByName {
paramData := paramNameKindToDataMap[key]
if count == len(routes) && paramData.Kind() != common.BodyParameterKind {
openAPIParam, err := o.buildParameter(paramData)
if err != nil {
return commonParamsMap, err
}
commonParamsMap[key] = openAPIParam
}
}
return commonParamsMap, nil
}
func (o *openAPI) buildParameters(restParam []common.Parameter) (ret []*spec3.Parameter, err error) {
ret = make([]*spec3.Parameter, len(restParam))
for i, v := range restParam {
ret[i], err = o.buildParameter(v)
if err != nil {
return ret, err
}
}
return ret, nil
}
func (o *openAPI) buildParameter(restParam common.Parameter) (ret *spec3.Parameter, err error) {
ret = &spec3.Parameter{
ParameterProps: spec3.ParameterProps{
Name: restParam.Name(),
Description: restParam.Description(),
Required: restParam.Required(),
},
}
switch restParam.Kind() {
case common.BodyParameterKind:
return nil, nil
case common.PathParameterKind:
ret.In = "path"
if !restParam.Required() {
return ret, fmt.Errorf("path parameters should be marked as required for parameter %v", restParam)
}
case common.QueryParameterKind:
ret.In = "query"
case common.HeaderParameterKind:
ret.In = "header"
/* TODO: add support for the cookie param */
default:
return ret, fmt.Errorf("unsupported restful parameter kind : %v", restParam.Kind())
}
openAPIType, openAPIFormat := common.OpenAPITypeFormat(restParam.DataType())
if openAPIType == "" {
return ret, fmt.Errorf("non-body Restful parameter type should be a simple type, but got : %v", restParam.DataType())
}
ret.Schema = &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{openAPIType},
Format: openAPIFormat,
UniqueItems: !restParam.AllowMultiple(),
},
}
return ret, nil
}
func (o *openAPI) buildDefinitionRecursively(name string) error {
uniqueName, extensions := o.config.GetDefinitionName(name)
if _, ok := o.spec.Components.Schemas[uniqueName]; ok {
return nil
}
if item, ok := o.definitions[name]; ok {
schema := &spec.Schema{
VendorExtensible: item.Schema.VendorExtensible,
SchemaProps: item.Schema.SchemaProps,
SwaggerSchemaProps: item.Schema.SwaggerSchemaProps,
}
if extensions != nil {
if schema.Extensions == nil {
schema.Extensions = spec.Extensions{}
}
for k, v := range extensions {
schema.Extensions[k] = v
}
}
// delete the embedded v2 schema if exists, otherwise no-op
delete(schema.VendorExtensible.Extensions, common.ExtensionV2Schema)
schema = builderutil.WrapRefs(schema)
o.spec.Components.Schemas[uniqueName] = schema
for _, v := range item.Dependencies {
if err := o.buildDefinitionRecursively(v); err != nil {
return err
}
}
} else {
return fmt.Errorf("cannot find model definition for %v. If you added a new type, you may need to add +k8s:openapi-gen=true to the package or type and run code-gen again", name)
}
return nil
}
func (o *openAPI) buildDefinitionForType(name string) (string, error) {
if err := o.buildDefinitionRecursively(name); err != nil {
return "", err
}
defName, _ := o.config.GetDefinitionName(name)
return "#/components/schemas/" + common.EscapeJsonPointer(defName), nil
}
func (o *openAPI) toSchema(name string) (_ *spec.Schema, err error) {
if openAPIType, openAPIFormat := common.OpenAPITypeFormat(name); openAPIType != "" {
return &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{openAPIType},
Format: openAPIFormat,
},
}, nil
} else {
ref, err := o.buildDefinitionForType(name)
if err != nil {
return nil, err
}
return &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: spec.MustCreateRef(ref),
},
}, nil
}
}
| {
o.spec.Components.SecuritySchemes[k] = securityScheme
} | conditional_block |
repository_analytics.py | # Copyright (c) 2015 Faculty of Engineering of the University of Porto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Module for declaring classes for the analytics/metrics.
This module is the most important to understand the Schwa API.
Here the analytics structure is declared and the defect probability
is computed. Science is being done here! We use Decimal from the standard library
since results were accumulating errors.
"""
import re
from decimal import Decimal
class Metrics:
|
def strip_path(path):
""" Extracts only the file name of a path """
name_re = re.compile("[^/]*\.([a-z]+)$")
return name_re.search(path).group(0)
class RepositoryAnalytics(Metrics):
""" Represents the Analytics of a Repository.
It stores the files analytics using a dict.
Attributes:
files_analytics: A dict that maps files paths to FileAnalytics instances.
"""
def __init__(self):
super().__init__()
self.files_analytics = {}
def is_empty(self):
return len(self.files_analytics) == 0
def compute_defect_probability(self):
""" Computes the defect probability for every child """
self.defect_prob = self.defect_probability()
for file_analytics in self.files_analytics.values():
file_analytics.compute_defect_probability()
def to_dict(self):
""" Converts repository analytics to a dict.
It traverses child analytics to convert and adds some information useful
for the Sunburst chart.
Returns:
A dict of all the analytics collected from the repository.
"""
children = [f_metrics.to_dict(f_path) for f_path, f_metrics in self.files_analytics.items()]
metrics = {
"name": "root",
"children": children
}
return metrics
class FileAnalytics(Metrics):
""" A class to represent File Analytics.
It stores child classes with a dict.
Attributes:
classes_analytics: A dict that maps classes names to ClassAnalytics instances.
"""
def __init__(self):
super().__init__()
self.classes_analytics = {}
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
for class_analytics in self.classes_analytics.values():
class_analytics.compute_defect_probability()
def to_dict(self, path):
metrics_dict = super().to_dict()
metrics_dict["type"] = "file"
metrics_dict["path"] = path
metrics_dict["name"] = strip_path(path)
metrics_dict["children"] = [c_metrics.to_dict(c_name) for c_name, c_metrics in self.classes_analytics.items()]
return metrics_dict
class ClassAnalytics(Metrics):
""" A class to represent Class Analytics.
It stores child methods and classes with a dict.
Attributes:
methods_analytics: A dict that maps methods names to MethodAnalytics instances.
classes_analytics: A dict that maps classes names to ClassAnalytics instances.
"""
def __init__(self):
super().__init__()
self.methods_analytics = {}
self.classes_analytics = {}
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
for method_analytics in self.methods_analytics.values():
method_analytics.compute_defect_probability()
def to_dict(self, name):
metrics_dict = super().to_dict()
metrics_dict["type"] = "class"
metrics_dict["name"] = name
metrics_dict["children"] = [m_metrics.to_dict(m_name) for m_name, m_metrics in self.methods_analytics.items()]
metrics_dict["children"].extend([c_metrics.to_dict(c_name) for c_name,
c_metrics in self.classes_analytics.items()])
return metrics_dict
class MethodAnalytics(Metrics):
""" A class to represent Method Analytics
It the leaf of analytics.
"""
def __init__(self):
super().__init__()
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
def to_dict(self, name):
metrics_dict = super().to_dict()
metrics_dict["type"] = "method"
metrics_dict["name"] = name
return metrics_dict | """ A class for representing a set of Metrics.
In analysis, each component have their analytics represented by a Metric instance.
Attributes:
fixes_dataset: A set of (revisions_twr, fixes_twr, authors_twr) that had a bug.
FIXES_WEIGHT: A Decimal having the fixes weight for the defect probability computation.
AUTHORS_WEIGHT: A Decimal having the authors weight for the defect probability computation.
REVISIONS_WEIGHT: A Decimal having the revisions weight for the defect probability computation.
TIME_RANGE: A Decimal from 0 to 1 that changes the time range of the TWR function.
revisions_timestamps: A list that stores the timestamps of every revision.
fixes_timestamps: A list that stores the timestamps of every bug fixing.
authors_timestamps: A list that stores the timestamps of when a component had a new author.
revisions_twr: A Decimal that is an accumulator of revisions TWR (see TWR formula).
fixes_twr: A Decimal that is an accumulator of fixes TWR (see TWR formula).
authors_twr: A Decimal that is an accumulator of authors TWR (see TWR formula).
authors: A set that have all email of authors that contributed (see TWR formula).
fixes: An int that is a counter of bug fixes.
revisions: An int that is a counter of revisions.
defect_prob: A Decimal representing the defect probability.
"""
fixes_dataset = set()
FIXES_WEIGHT = Decimal(0.5)
AUTHORS_WEIGHT = Decimal(0.25)
REVISIONS_WEIGHT = Decimal(0.25)
TIME_RANGE = Decimal(0.4)
def __init__(self):
self.revisions_timestamps = []
self.fixes_timestamps = []
self.authors_timestamps = []
self.revisions_twr = 0
self.fixes_twr = 0
self.authors_twr = 0
self.authors = set()
self.fixes = 0
self.revisions = 0
self.defect_prob = 0
self.last_twr = None
@staticmethod
def twr(begin_ts, ts, current_ts):
""" Computes a Time Weighted Risk parcel.
Normalizes the timestamps and returns the TWR parcel.
Args:
begin_ts: An int representing the beginning timestamp.
ts: An int representing a specific timestamp.
current_ts: An int representing the most recent timestamp.
Returns:
A Decimal from 0 to 0.5.
"""
begin_diff = ts - begin_ts
diff = current_ts - begin_ts
if diff == 0:
normalized = 1
else:
normalized = Decimal(begin_diff) / Decimal(diff)
twr = 1 / (1 + Decimal.exp(Decimal(-12) * normalized + Decimal(2) + ((1 - Metrics.TIME_RANGE) * 10)))
return twr
@staticmethod
def list_twr(seq, begin_ts, current_ts):
""" Computes the TWR sum from a list.
By receiving a list, computes the TWR sum, by giving the begin timestamp
and the most current timestamp.
Args:
seq: A list of timestamps ints.
begin_ts: An int representing the beginning timestamp.
current_ts: An int representing the most recent timestamp.
Returns:
A float representing the TWR sum.
"""
twr_sum = 0
for ts in seq:
twr_sum += Metrics.twr(begin_ts, ts, current_ts)
return twr_sum
def update(self, begin_ts, ts, current_ts, author, is_bug_fixing):
""" Updates metrics.
By receiving the commit information, updates the existing metrics.
Args:
begin_ts: An int representing the beginning timestamp.
ts: An int representing a specific timestamp.
current_ts: An int representing the most recent timestamp.
author: A string representing the author email.
is_bug_fixing: A boolean that indicates if is a bug fixing commit
"""
# Updates fixes
if is_bug_fixing:
self.add_to_dataset(begin_ts)
self.fixes += 1
self.fixes_timestamps.append(ts)
self.fixes_twr += Metrics.twr(begin_ts, ts, current_ts)
# Updates revisions
self.revisions += 1
self.revisions_timestamps.append(ts)
self.revisions_twr += Metrics.twr(begin_ts, ts, current_ts)
# Updates authors
if author not in self.authors:
self.authors.add(author)
self.authors_timestamps.append(ts)
self.authors_twr += Metrics.twr(begin_ts, ts, current_ts)
def add_to_dataset(self, begin_ts):
""" Adds a bug case to a dataset.
Adds (revisions_twr, fixes_twr, authors_twr) when a bug fix happened,
since this metrics indicate a presence of a bug. The reasoning is that
in the last revision, the component had a bug.
Args:
begin_ts: An int that is timestamp of the first commit.
"""
if self.revisions_timestamps:
last_revision_timestamp = self.revisions_timestamps[-1]
revisions_twr = Metrics.list_twr(self.revisions_timestamps, begin_ts, last_revision_timestamp)
fixes_twr = Metrics.list_twr(self.fixes_timestamps, begin_ts, last_revision_timestamp)
authors_twr = Metrics.list_twr(self.authors_timestamps, begin_ts, last_revision_timestamp)
self.last_twr = (revisions_twr, fixes_twr, authors_twr)
Metrics.fixes_dataset.add((revisions_twr, fixes_twr, authors_twr))
def defect_probability(self):
probability = Metrics.compute_defect_probability(self.revisions_twr, self.fixes_twr, self.authors_twr,
Metrics.REVISIONS_WEIGHT, Metrics.FIXES_WEIGHT,
Metrics.AUTHORS_WEIGHT)
return probability
@staticmethod
def compute_defect_probability(r_twr, f_twr, a_twr, r_weight, f_weight, a_weight):
twr = r_twr * r_weight + f_twr * f_weight + a_twr * a_weight
probability = 1 - Decimal.exp(- twr)
return probability
def to_dict(self):
metrics_dict = {
"size": str(self.defect_prob),
"prob": str(self.defect_prob),
"revisions": self.revisions,
"fixes": self.fixes,
"authors": len(self.authors)
}
return metrics_dict | identifier_body |
repository_analytics.py | # Copyright (c) 2015 Faculty of Engineering of the University of Porto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Module for declaring classes for the analytics/metrics.
This module is the most important to understand the Schwa API.
Here the analytics structure is declared and the defect probability
is computed. Science is being done here! We use Decimal from the standard library
since results were accumulating errors.
"""
import re
from decimal import Decimal
class Metrics:
""" A class for representing a set of Metrics.
In analysis, each component have their analytics represented by a Metric instance.
Attributes:
fixes_dataset: A set of (revisions_twr, fixes_twr, authors_twr) that had a bug.
FIXES_WEIGHT: A Decimal having the fixes weight for the defect probability computation.
AUTHORS_WEIGHT: A Decimal having the authors weight for the defect probability computation.
REVISIONS_WEIGHT: A Decimal having the revisions weight for the defect probability computation.
TIME_RANGE: A Decimal from 0 to 1 that changes the time range of the TWR function.
revisions_timestamps: A list that stores the timestamps of every revision.
fixes_timestamps: A list that stores the timestamps of every bug fixing.
authors_timestamps: A list that stores the timestamps of when a component had a new author.
revisions_twr: A Decimal that is an accumulator of revisions TWR (see TWR formula).
fixes_twr: A Decimal that is an accumulator of fixes TWR (see TWR formula).
authors_twr: A Decimal that is an accumulator of authors TWR (see TWR formula).
authors: A set that have all email of authors that contributed (see TWR formula).
fixes: An int that is a counter of bug fixes.
revisions: An int that is a counter of revisions.
defect_prob: A Decimal representing the defect probability.
"""
fixes_dataset = set()
FIXES_WEIGHT = Decimal(0.5)
AUTHORS_WEIGHT = Decimal(0.25)
REVISIONS_WEIGHT = Decimal(0.25)
TIME_RANGE = Decimal(0.4)
def __init__(self):
self.revisions_timestamps = []
self.fixes_timestamps = []
self.authors_timestamps = []
self.revisions_twr = 0
self.fixes_twr = 0
self.authors_twr = 0
self.authors = set()
self.fixes = 0
self.revisions = 0
self.defect_prob = 0
self.last_twr = None
@staticmethod
def twr(begin_ts, ts, current_ts):
""" Computes a Time Weighted Risk parcel.
Normalizes the timestamps and returns the TWR parcel.
Args:
begin_ts: An int representing the beginning timestamp.
ts: An int representing a specific timestamp.
current_ts: An int representing the most recent timestamp.
Returns:
A Decimal from 0 to 0.5.
"""
begin_diff = ts - begin_ts
diff = current_ts - begin_ts
if diff == 0:
normalized = 1
else:
normalized = Decimal(begin_diff) / Decimal(diff)
twr = 1 / (1 + Decimal.exp(Decimal(-12) * normalized + Decimal(2) + ((1 - Metrics.TIME_RANGE) * 10)))
return twr
@staticmethod
def list_twr(seq, begin_ts, current_ts):
""" Computes the TWR sum from a list.
By receiving a list, computes the TWR sum, by giving the begin timestamp
and the most current timestamp.
Args:
seq: A list of timestamps ints.
begin_ts: An int representing the beginning timestamp.
current_ts: An int representing the most recent timestamp.
Returns:
A float representing the TWR sum.
"""
twr_sum = 0
for ts in seq:
twr_sum += Metrics.twr(begin_ts, ts, current_ts)
return twr_sum
def update(self, begin_ts, ts, current_ts, author, is_bug_fixing):
""" Updates metrics.
By receiving the commit information, updates the existing metrics.
Args:
begin_ts: An int representing the beginning timestamp.
ts: An int representing a specific timestamp.
current_ts: An int representing the most recent timestamp.
author: A string representing the author email.
is_bug_fixing: A boolean that indicates if is a bug fixing commit
"""
# Updates fixes
if is_bug_fixing:
self.add_to_dataset(begin_ts)
self.fixes += 1
self.fixes_timestamps.append(ts)
self.fixes_twr += Metrics.twr(begin_ts, ts, current_ts)
# Updates revisions
self.revisions += 1
self.revisions_timestamps.append(ts)
self.revisions_twr += Metrics.twr(begin_ts, ts, current_ts)
# Updates authors
if author not in self.authors:
self.authors.add(author)
self.authors_timestamps.append(ts)
self.authors_twr += Metrics.twr(begin_ts, ts, current_ts)
def add_to_dataset(self, begin_ts):
""" Adds a bug case to a dataset.
Adds (revisions_twr, fixes_twr, authors_twr) when a bug fix happened,
since this metrics indicate a presence of a bug. The reasoning is that
in the last revision, the component had a bug.
Args:
begin_ts: An int that is timestamp of the first commit.
"""
if self.revisions_timestamps:
last_revision_timestamp = self.revisions_timestamps[-1]
revisions_twr = Metrics.list_twr(self.revisions_timestamps, begin_ts, last_revision_timestamp)
fixes_twr = Metrics.list_twr(self.fixes_timestamps, begin_ts, last_revision_timestamp)
authors_twr = Metrics.list_twr(self.authors_timestamps, begin_ts, last_revision_timestamp)
self.last_twr = (revisions_twr, fixes_twr, authors_twr)
Metrics.fixes_dataset.add((revisions_twr, fixes_twr, authors_twr))
def defect_probability(self):
probability = Metrics.compute_defect_probability(self.revisions_twr, self.fixes_twr, self.authors_twr,
Metrics.REVISIONS_WEIGHT, Metrics.FIXES_WEIGHT,
Metrics.AUTHORS_WEIGHT)
return probability
@staticmethod
def compute_defect_probability(r_twr, f_twr, a_twr, r_weight, f_weight, a_weight):
twr = r_twr * r_weight + f_twr * f_weight + a_twr * a_weight
probability = 1 - Decimal.exp(- twr)
return probability
def to_dict(self):
metrics_dict = {
"size": str(self.defect_prob),
"prob": str(self.defect_prob),
"revisions": self.revisions,
"fixes": self.fixes,
"authors": len(self.authors)
}
return metrics_dict
def strip_path(path):
""" Extracts only the file name of a path """
name_re = re.compile("[^/]*\.([a-z]+)$")
return name_re.search(path).group(0)
class RepositoryAnalytics(Metrics):
""" Represents the Analytics of a Repository.
It stores the files analytics using a dict.
Attributes:
files_analytics: A dict that maps files paths to FileAnalytics instances.
"""
def __init__(self):
super().__init__()
self.files_analytics = {}
def is_empty(self):
return len(self.files_analytics) == 0
def compute_defect_probability(self):
""" Computes the defect probability for every child """
self.defect_prob = self.defect_probability()
for file_analytics in self.files_analytics.values():
file_analytics.compute_defect_probability()
def to_dict(self):
""" Converts repository analytics to a dict.
It traverses child analytics to convert and adds some information useful
for the Sunburst chart.
Returns:
A dict of all the analytics collected from the repository.
"""
children = [f_metrics.to_dict(f_path) for f_path, f_metrics in self.files_analytics.items()]
metrics = {
"name": "root",
"children": children
}
return metrics
class FileAnalytics(Metrics):
""" A class to represent File Analytics.
It stores child classes with a dict.
Attributes:
classes_analytics: A dict that maps classes names to ClassAnalytics instances.
"""
def | (self):
super().__init__()
self.classes_analytics = {}
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
for class_analytics in self.classes_analytics.values():
class_analytics.compute_defect_probability()
def to_dict(self, path):
metrics_dict = super().to_dict()
metrics_dict["type"] = "file"
metrics_dict["path"] = path
metrics_dict["name"] = strip_path(path)
metrics_dict["children"] = [c_metrics.to_dict(c_name) for c_name, c_metrics in self.classes_analytics.items()]
return metrics_dict
class ClassAnalytics(Metrics):
""" A class to represent Class Analytics.
It stores child methods and classes with a dict.
Attributes:
methods_analytics: A dict that maps methods names to MethodAnalytics instances.
classes_analytics: A dict that maps classes names to ClassAnalytics instances.
"""
def __init__(self):
super().__init__()
self.methods_analytics = {}
self.classes_analytics = {}
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
for method_analytics in self.methods_analytics.values():
method_analytics.compute_defect_probability()
def to_dict(self, name):
metrics_dict = super().to_dict()
metrics_dict["type"] = "class"
metrics_dict["name"] = name
metrics_dict["children"] = [m_metrics.to_dict(m_name) for m_name, m_metrics in self.methods_analytics.items()]
metrics_dict["children"].extend([c_metrics.to_dict(c_name) for c_name,
c_metrics in self.classes_analytics.items()])
return metrics_dict
class MethodAnalytics(Metrics):
""" A class to represent Method Analytics
It the leaf of analytics.
"""
def __init__(self):
super().__init__()
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
def to_dict(self, name):
metrics_dict = super().to_dict()
metrics_dict["type"] = "method"
metrics_dict["name"] = name
return metrics_dict | __init__ | identifier_name |
repository_analytics.py | # Copyright (c) 2015 Faculty of Engineering of the University of Porto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| since results were accumulating errors.
"""
import re
from decimal import Decimal
class Metrics:
""" A class for representing a set of Metrics.
In analysis, each component have their analytics represented by a Metric instance.
Attributes:
fixes_dataset: A set of (revisions_twr, fixes_twr, authors_twr) that had a bug.
FIXES_WEIGHT: A Decimal having the fixes weight for the defect probability computation.
AUTHORS_WEIGHT: A Decimal having the authors weight for the defect probability computation.
REVISIONS_WEIGHT: A Decimal having the revisions weight for the defect probability computation.
TIME_RANGE: A Decimal from 0 to 1 that changes the time range of the TWR function.
revisions_timestamps: A list that stores the timestamps of every revision.
fixes_timestamps: A list that stores the timestamps of every bug fixing.
authors_timestamps: A list that stores the timestamps of when a component had a new author.
revisions_twr: A Decimal that is an accumulator of revisions TWR (see TWR formula).
fixes_twr: A Decimal that is an accumulator of fixes TWR (see TWR formula).
authors_twr: A Decimal that is an accumulator of authors TWR (see TWR formula).
authors: A set that have all email of authors that contributed (see TWR formula).
fixes: An int that is a counter of bug fixes.
revisions: An int that is a counter of revisions.
defect_prob: A Decimal representing the defect probability.
"""
fixes_dataset = set()
FIXES_WEIGHT = Decimal(0.5)
AUTHORS_WEIGHT = Decimal(0.25)
REVISIONS_WEIGHT = Decimal(0.25)
TIME_RANGE = Decimal(0.4)
def __init__(self):
self.revisions_timestamps = []
self.fixes_timestamps = []
self.authors_timestamps = []
self.revisions_twr = 0
self.fixes_twr = 0
self.authors_twr = 0
self.authors = set()
self.fixes = 0
self.revisions = 0
self.defect_prob = 0
self.last_twr = None
@staticmethod
def twr(begin_ts, ts, current_ts):
""" Computes a Time Weighted Risk parcel.
Normalizes the timestamps and returns the TWR parcel.
Args:
begin_ts: An int representing the beginning timestamp.
ts: An int representing a specific timestamp.
current_ts: An int representing the most recent timestamp.
Returns:
A Decimal from 0 to 0.5.
"""
begin_diff = ts - begin_ts
diff = current_ts - begin_ts
if diff == 0:
normalized = 1
else:
normalized = Decimal(begin_diff) / Decimal(diff)
twr = 1 / (1 + Decimal.exp(Decimal(-12) * normalized + Decimal(2) + ((1 - Metrics.TIME_RANGE) * 10)))
return twr
@staticmethod
def list_twr(seq, begin_ts, current_ts):
""" Computes the TWR sum from a list.
By receiving a list, computes the TWR sum, by giving the begin timestamp
and the most current timestamp.
Args:
seq: A list of timestamps ints.
begin_ts: An int representing the beginning timestamp.
current_ts: An int representing the most recent timestamp.
Returns:
A float representing the TWR sum.
"""
twr_sum = 0
for ts in seq:
twr_sum += Metrics.twr(begin_ts, ts, current_ts)
return twr_sum
def update(self, begin_ts, ts, current_ts, author, is_bug_fixing):
""" Updates metrics.
By receiving the commit information, updates the existing metrics.
Args:
begin_ts: An int representing the beginning timestamp.
ts: An int representing a specific timestamp.
current_ts: An int representing the most recent timestamp.
author: A string representing the author email.
is_bug_fixing: A boolean that indicates if is a bug fixing commit
"""
# Updates fixes
if is_bug_fixing:
self.add_to_dataset(begin_ts)
self.fixes += 1
self.fixes_timestamps.append(ts)
self.fixes_twr += Metrics.twr(begin_ts, ts, current_ts)
# Updates revisions
self.revisions += 1
self.revisions_timestamps.append(ts)
self.revisions_twr += Metrics.twr(begin_ts, ts, current_ts)
# Updates authors
if author not in self.authors:
self.authors.add(author)
self.authors_timestamps.append(ts)
self.authors_twr += Metrics.twr(begin_ts, ts, current_ts)
def add_to_dataset(self, begin_ts):
""" Adds a bug case to a dataset.
Adds (revisions_twr, fixes_twr, authors_twr) when a bug fix happened,
since this metrics indicate a presence of a bug. The reasoning is that
in the last revision, the component had a bug.
Args:
begin_ts: An int that is timestamp of the first commit.
"""
if self.revisions_timestamps:
last_revision_timestamp = self.revisions_timestamps[-1]
revisions_twr = Metrics.list_twr(self.revisions_timestamps, begin_ts, last_revision_timestamp)
fixes_twr = Metrics.list_twr(self.fixes_timestamps, begin_ts, last_revision_timestamp)
authors_twr = Metrics.list_twr(self.authors_timestamps, begin_ts, last_revision_timestamp)
self.last_twr = (revisions_twr, fixes_twr, authors_twr)
Metrics.fixes_dataset.add((revisions_twr, fixes_twr, authors_twr))
def defect_probability(self):
probability = Metrics.compute_defect_probability(self.revisions_twr, self.fixes_twr, self.authors_twr,
Metrics.REVISIONS_WEIGHT, Metrics.FIXES_WEIGHT,
Metrics.AUTHORS_WEIGHT)
return probability
@staticmethod
def compute_defect_probability(r_twr, f_twr, a_twr, r_weight, f_weight, a_weight):
twr = r_twr * r_weight + f_twr * f_weight + a_twr * a_weight
probability = 1 - Decimal.exp(- twr)
return probability
def to_dict(self):
metrics_dict = {
"size": str(self.defect_prob),
"prob": str(self.defect_prob),
"revisions": self.revisions,
"fixes": self.fixes,
"authors": len(self.authors)
}
return metrics_dict
def strip_path(path):
""" Extracts only the file name of a path """
name_re = re.compile("[^/]*\.([a-z]+)$")
return name_re.search(path).group(0)
class RepositoryAnalytics(Metrics):
""" Represents the Analytics of a Repository.
It stores the files analytics using a dict.
Attributes:
files_analytics: A dict that maps files paths to FileAnalytics instances.
"""
def __init__(self):
super().__init__()
self.files_analytics = {}
def is_empty(self):
return len(self.files_analytics) == 0
def compute_defect_probability(self):
""" Computes the defect probability for every child """
self.defect_prob = self.defect_probability()
for file_analytics in self.files_analytics.values():
file_analytics.compute_defect_probability()
def to_dict(self):
""" Converts repository analytics to a dict.
It traverses child analytics to convert and adds some information useful
for the Sunburst chart.
Returns:
A dict of all the analytics collected from the repository.
"""
children = [f_metrics.to_dict(f_path) for f_path, f_metrics in self.files_analytics.items()]
metrics = {
"name": "root",
"children": children
}
return metrics
class FileAnalytics(Metrics):
""" A class to represent File Analytics.
It stores child classes with a dict.
Attributes:
classes_analytics: A dict that maps classes names to ClassAnalytics instances.
"""
def __init__(self):
super().__init__()
self.classes_analytics = {}
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
for class_analytics in self.classes_analytics.values():
class_analytics.compute_defect_probability()
def to_dict(self, path):
metrics_dict = super().to_dict()
metrics_dict["type"] = "file"
metrics_dict["path"] = path
metrics_dict["name"] = strip_path(path)
metrics_dict["children"] = [c_metrics.to_dict(c_name) for c_name, c_metrics in self.classes_analytics.items()]
return metrics_dict
class ClassAnalytics(Metrics):
""" A class to represent Class Analytics.
It stores child methods and classes with a dict.
Attributes:
methods_analytics: A dict that maps methods names to MethodAnalytics instances.
classes_analytics: A dict that maps classes names to ClassAnalytics instances.
"""
def __init__(self):
super().__init__()
self.methods_analytics = {}
self.classes_analytics = {}
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
for method_analytics in self.methods_analytics.values():
method_analytics.compute_defect_probability()
def to_dict(self, name):
metrics_dict = super().to_dict()
metrics_dict["type"] = "class"
metrics_dict["name"] = name
metrics_dict["children"] = [m_metrics.to_dict(m_name) for m_name, m_metrics in self.methods_analytics.items()]
metrics_dict["children"].extend([c_metrics.to_dict(c_name) for c_name,
c_metrics in self.classes_analytics.items()])
return metrics_dict
class MethodAnalytics(Metrics):
""" A class to represent Method Analytics
It the leaf of analytics.
"""
def __init__(self):
super().__init__()
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
def to_dict(self, name):
metrics_dict = super().to_dict()
metrics_dict["type"] = "method"
metrics_dict["name"] = name
return metrics_dict | """ Module for declaring classes for the analytics/metrics.
This module is the most important to understand the Schwa API.
Here the analytics structure is declared and the defect probability
is computed. Science is being done here! We use Decimal from the standard library | random_line_split |
repository_analytics.py | # Copyright (c) 2015 Faculty of Engineering of the University of Porto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Module for declaring classes for the analytics/metrics.
This module is the most important to understand the Schwa API.
Here the analytics structure is declared and the defect probability
is computed. Science is being done here! We use Decimal from the standard library
since results were accumulating errors.
"""
import re
from decimal import Decimal
class Metrics:
""" A class for representing a set of Metrics.
In analysis, each component have their analytics represented by a Metric instance.
Attributes:
fixes_dataset: A set of (revisions_twr, fixes_twr, authors_twr) that had a bug.
FIXES_WEIGHT: A Decimal having the fixes weight for the defect probability computation.
AUTHORS_WEIGHT: A Decimal having the authors weight for the defect probability computation.
REVISIONS_WEIGHT: A Decimal having the revisions weight for the defect probability computation.
TIME_RANGE: A Decimal from 0 to 1 that changes the time range of the TWR function.
revisions_timestamps: A list that stores the timestamps of every revision.
fixes_timestamps: A list that stores the timestamps of every bug fixing.
authors_timestamps: A list that stores the timestamps of when a component had a new author.
revisions_twr: A Decimal that is an accumulator of revisions TWR (see TWR formula).
fixes_twr: A Decimal that is an accumulator of fixes TWR (see TWR formula).
authors_twr: A Decimal that is an accumulator of authors TWR (see TWR formula).
authors: A set that have all email of authors that contributed (see TWR formula).
fixes: An int that is a counter of bug fixes.
revisions: An int that is a counter of revisions.
defect_prob: A Decimal representing the defect probability.
"""
fixes_dataset = set()
FIXES_WEIGHT = Decimal(0.5)
AUTHORS_WEIGHT = Decimal(0.25)
REVISIONS_WEIGHT = Decimal(0.25)
TIME_RANGE = Decimal(0.4)
def __init__(self):
self.revisions_timestamps = []
self.fixes_timestamps = []
self.authors_timestamps = []
self.revisions_twr = 0
self.fixes_twr = 0
self.authors_twr = 0
self.authors = set()
self.fixes = 0
self.revisions = 0
self.defect_prob = 0
self.last_twr = None
@staticmethod
def twr(begin_ts, ts, current_ts):
""" Computes a Time Weighted Risk parcel.
Normalizes the timestamps and returns the TWR parcel.
Args:
begin_ts: An int representing the beginning timestamp.
ts: An int representing a specific timestamp.
current_ts: An int representing the most recent timestamp.
Returns:
A Decimal from 0 to 0.5.
"""
begin_diff = ts - begin_ts
diff = current_ts - begin_ts
if diff == 0:
normalized = 1
else:
normalized = Decimal(begin_diff) / Decimal(diff)
twr = 1 / (1 + Decimal.exp(Decimal(-12) * normalized + Decimal(2) + ((1 - Metrics.TIME_RANGE) * 10)))
return twr
@staticmethod
def list_twr(seq, begin_ts, current_ts):
""" Computes the TWR sum from a list.
By receiving a list, computes the TWR sum, by giving the begin timestamp
and the most current timestamp.
Args:
seq: A list of timestamps ints.
begin_ts: An int representing the beginning timestamp.
current_ts: An int representing the most recent timestamp.
Returns:
A float representing the TWR sum.
"""
twr_sum = 0
for ts in seq:
twr_sum += Metrics.twr(begin_ts, ts, current_ts)
return twr_sum
def update(self, begin_ts, ts, current_ts, author, is_bug_fixing):
""" Updates metrics.
By receiving the commit information, updates the existing metrics.
Args:
begin_ts: An int representing the beginning timestamp.
ts: An int representing a specific timestamp.
current_ts: An int representing the most recent timestamp.
author: A string representing the author email.
is_bug_fixing: A boolean that indicates if is a bug fixing commit
"""
# Updates fixes
if is_bug_fixing:
|
# Updates revisions
self.revisions += 1
self.revisions_timestamps.append(ts)
self.revisions_twr += Metrics.twr(begin_ts, ts, current_ts)
# Updates authors
if author not in self.authors:
self.authors.add(author)
self.authors_timestamps.append(ts)
self.authors_twr += Metrics.twr(begin_ts, ts, current_ts)
def add_to_dataset(self, begin_ts):
""" Adds a bug case to a dataset.
Adds (revisions_twr, fixes_twr, authors_twr) when a bug fix happened,
since this metrics indicate a presence of a bug. The reasoning is that
in the last revision, the component had a bug.
Args:
begin_ts: An int that is timestamp of the first commit.
"""
if self.revisions_timestamps:
last_revision_timestamp = self.revisions_timestamps[-1]
revisions_twr = Metrics.list_twr(self.revisions_timestamps, begin_ts, last_revision_timestamp)
fixes_twr = Metrics.list_twr(self.fixes_timestamps, begin_ts, last_revision_timestamp)
authors_twr = Metrics.list_twr(self.authors_timestamps, begin_ts, last_revision_timestamp)
self.last_twr = (revisions_twr, fixes_twr, authors_twr)
Metrics.fixes_dataset.add((revisions_twr, fixes_twr, authors_twr))
def defect_probability(self):
probability = Metrics.compute_defect_probability(self.revisions_twr, self.fixes_twr, self.authors_twr,
Metrics.REVISIONS_WEIGHT, Metrics.FIXES_WEIGHT,
Metrics.AUTHORS_WEIGHT)
return probability
@staticmethod
def compute_defect_probability(r_twr, f_twr, a_twr, r_weight, f_weight, a_weight):
twr = r_twr * r_weight + f_twr * f_weight + a_twr * a_weight
probability = 1 - Decimal.exp(- twr)
return probability
def to_dict(self):
metrics_dict = {
"size": str(self.defect_prob),
"prob": str(self.defect_prob),
"revisions": self.revisions,
"fixes": self.fixes,
"authors": len(self.authors)
}
return metrics_dict
def strip_path(path):
""" Extracts only the file name of a path """
name_re = re.compile("[^/]*\.([a-z]+)$")
return name_re.search(path).group(0)
class RepositoryAnalytics(Metrics):
""" Represents the Analytics of a Repository.
It stores the files analytics using a dict.
Attributes:
files_analytics: A dict that maps files paths to FileAnalytics instances.
"""
def __init__(self):
super().__init__()
self.files_analytics = {}
def is_empty(self):
return len(self.files_analytics) == 0
def compute_defect_probability(self):
""" Computes the defect probability for every child """
self.defect_prob = self.defect_probability()
for file_analytics in self.files_analytics.values():
file_analytics.compute_defect_probability()
def to_dict(self):
""" Converts repository analytics to a dict.
It traverses child analytics to convert and adds some information useful
for the Sunburst chart.
Returns:
A dict of all the analytics collected from the repository.
"""
children = [f_metrics.to_dict(f_path) for f_path, f_metrics in self.files_analytics.items()]
metrics = {
"name": "root",
"children": children
}
return metrics
class FileAnalytics(Metrics):
""" A class to represent File Analytics.
It stores child classes with a dict.
Attributes:
classes_analytics: A dict that maps classes names to ClassAnalytics instances.
"""
def __init__(self):
super().__init__()
self.classes_analytics = {}
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
for class_analytics in self.classes_analytics.values():
class_analytics.compute_defect_probability()
def to_dict(self, path):
metrics_dict = super().to_dict()
metrics_dict["type"] = "file"
metrics_dict["path"] = path
metrics_dict["name"] = strip_path(path)
metrics_dict["children"] = [c_metrics.to_dict(c_name) for c_name, c_metrics in self.classes_analytics.items()]
return metrics_dict
class ClassAnalytics(Metrics):
""" A class to represent Class Analytics.
It stores child methods and classes with a dict.
Attributes:
methods_analytics: A dict that maps methods names to MethodAnalytics instances.
classes_analytics: A dict that maps classes names to ClassAnalytics instances.
"""
def __init__(self):
super().__init__()
self.methods_analytics = {}
self.classes_analytics = {}
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
for method_analytics in self.methods_analytics.values():
method_analytics.compute_defect_probability()
def to_dict(self, name):
metrics_dict = super().to_dict()
metrics_dict["type"] = "class"
metrics_dict["name"] = name
metrics_dict["children"] = [m_metrics.to_dict(m_name) for m_name, m_metrics in self.methods_analytics.items()]
metrics_dict["children"].extend([c_metrics.to_dict(c_name) for c_name,
c_metrics in self.classes_analytics.items()])
return metrics_dict
class MethodAnalytics(Metrics):
""" A class to represent Method Analytics
It the leaf of analytics.
"""
def __init__(self):
super().__init__()
def compute_defect_probability(self):
self.defect_prob = self.defect_probability()
def to_dict(self, name):
metrics_dict = super().to_dict()
metrics_dict["type"] = "method"
metrics_dict["name"] = name
return metrics_dict | self.add_to_dataset(begin_ts)
self.fixes += 1
self.fixes_timestamps.append(ts)
self.fixes_twr += Metrics.twr(begin_ts, ts, current_ts) | conditional_block |
main.rs | #![allow(dead_code)]
#![allow(unused_variables)]
use std::collections::{HashMap, HashSet};
use std::io::stdin;
use std::mem;
mod pm;
// const MEANING_OF_LIFE: u16 = 456; // no fixed address
fn main() {
// primitive_types ();
// operators();
// scope_and_shadowing();
// println!("const MEANING_OF_LIFE = {}", MEANING_OF_LIFE)
// if_statement();
// while_and_loop();
// match_statecment();
// for_loop();
// combination_lock();
// structures();
// enums();
// unions();
// process_value();
// option_T()
// array();
// slices();
// tuples();
// pm::pattern_matching();
// generics();
// vectors();
// hashmaps();
// hashsets();
// functions();
// methods();
closures();
// h_o_functions();
}
fn h_o_functions() {
}
fn closures() {
let sh = say_hello;
sh();
let plus_one = |x:i32| -> i32 {x+1};
let a = 6;
println!("{} +1 = {}", a, plus_one(a));
let plus_two = |x:isize| {
let mut z = x;
z+=2;
z
};
println!("{} +2 = {}", 3, plus_two(3));
}
fn say_hello() {println!("Hello")}
fn methods() {
struct Point {
x: f64,
y: f64
}
struct Line {
start: Point,
end: Point,
}
impl Line {
fn len(&self) -> f64 {
let dx = self.start.x - self.end.x;
let dy = self.start.y - self.end.y;
(dx*dx + dy*dy).sqrt()
}
}
let p = Point {x: 3.0, y: 4.0};
let p2 = Point {x: 5.0, y: 10.0};
let myline = Line { start: p, end: p2};
println!("lengh = {}", myline.len())
}
fn functions() {
print_value(33);
let mut z = 1;
increase1(&mut z);
println!("z is {}", z);
let a = 3;
let b = 5;
let p = product(a, b);
}
fn product(x: i32, y: i32) -> i32 {
// return x*y;
x * y
}
fn increase1(x: &mut i32) {
*x += 1;
}
fn print_value(x: i32) {
println!("x is {}", x)
}
fn hashsets() {
let mut greeks = HashSet::new();
greeks.insert("alfa");
greeks.insert("delta");
greeks.insert("hamma");
greeks.insert("delta");
println!("{:?}", greeks);
let added_delta = greeks.insert("delta");
if added_delta {
println!("We added delta! hooray!")
}
let added_vega = greeks.insert("vega");
if added_vega {
println!("We added vega! hooray!")
}
if !greeks.contains("kappa") {
println!("We don't have kappa")
}
}
fn hashmaps() {
let mut shapes = HashMap::new();
shapes.insert(String::from("triangle"), 3);
shapes.insert(String::from("square"), 4);
println!("hashmaps: {:?}", shapes);
println!("a square has {} sides", shapes["square"]);
shapes.insert("square".into(), 5);
println!("{:?}", shapes);
for (key, value) in &shapes {
println!("key: {}, value: {}", key, value);
}
shapes.entry("circle".into()).or_insert(1);
{
let actual = shapes.entry("circle".into()).or_insert(2);
*actual = 0;
}
println!("{:?}", shapes);
let _1_5: HashSet<_> = (1..=5).collect();
let _6_10: HashSet<_> = (6..=10).collect();
let _1_10: HashSet<_> = (1..=10).collect();
let _2_8: HashSet<_> = (2..=8).collect();
//subset
}
fn vectors() {
let mut a = Vec::new();
a.push(1);
a.push(2);
a.push(3);
println!("a = {:?}", a);
a.push(44);
println!("a = {:?}", a);
//usize isize
let idx: usize = 2;
println!("a[2] = {}", a[idx]);
match a.get(6) {
Some(x) => println!("a[6] = {}", x),
None => println!("error, no such element")
}
for x in &a { println!("{}", x) }
a.push(77);
println!("{:?}", a);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
while let Some(x) = a.pop() {
println!("{}", x)
}
}
fn generics() {
struct Point<T, V> {
x: T,
y: V,
}
struct Point1<T> {
x: T,
y: T,
}
struct Line<T> {
start: Point1<T>,
end: Point1<T>,
}
let a: Point<u16, i32> = Point { x: 0, y: 0 };
let b: Point<f64, f32> = Point { x: 1.2, y: 3.4 };
let c: Point<i32, f64> = Point { x: 3, y: 5.0 };
let d: Point<i32, f64> = Point { x: 1, y: 4.5 };
let x: Point1<f64> = Point1 { x: 1f64, y: 2f64 };
let y: Point1<f64> = Point1 { x: 3f64, y: 4f64 };
let myline = Line { start: x, end: y };
}
fn tuples() {
let x = 3;
let y = 4;
let sp = sum_and_product(x, y);
let (sum, product) = sp;
println!("sp = {:?}", (sum, product));
println!("{0} + {1} = {2}", x, y, sum);
println!("{0} + {1} = {2}", x, y, product);
let sp2 = sum_and_product(4, 7);
let combined = (sp, sp2);
println!("{1:?} , {2:?}, {0:?} ", combined.0, combined.1, combined);
println!("{1:?} , {2:?}, {0:?} ", combined.0, (combined.0).0, (combined
.0).1);
let ((c, d), (e, f)) = combined;
println!("{},{},{},{}", c, d, e, f);
let foo = (true, 42.0, -1i8);
println!("{:?}", foo);
let meaning = 42;
println!("{:?}", meaning);
}
fn sum_and_product(x: i32, y: i32) -> (i32, i32) {
(x + y, x * y)
}
fn use_slices(slice: &mut [i32]) {
println!("first elem = {}, len = {}", slice[0], slice.len());
slice[0] = 4444;
}
fn slices() {
let mut data = [1, 2, 3, 4, 5];
use_slices(&mut data[1..4]);
use_slices(&mut data);
println!("{:?}", data)
}
fn array() {
let mut a: [i32; 5] = [1, 2, 3, 4, 5, ];
println!("a has {} elements, first is {}", a.len(), a[0]);
a[0] = 321;
println!("a has {} elements, first is {}", a.len(), a[0]);
println!("{:?}", a);
if a == [321, 2, 3, 4, 5] {
println!("match");
}
let b = [1u64; 10];
for i in 0..b.len() {
println!("{}", b[i])
};
println!("b took up {} bytes", mem::size_of_val(&b));
println!("b {:?}", b);
let mtx: [[f64; 3]; 2] = [
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6]
];
println!("mtx = {:?}", mtx);
for i in 0..mtx.len() {
for j in 0..mtx[i].len() {
if i == j {
print!("diagonal: {} ", mtx[i][j]);
}
}
}
println!();
}
union IntOrFloat {
i: i32,
f: f32,
}
fn option_t() {
let x = 3.0;
let y = 1.0;
//Option
let result =
if y != 0.0 { Some(x / y) } else { None };
match result {
Some(z) => {
println!("{}/{} ={}", x, y, z)
}
None => println!("cannot divide by zero")
}
if let Some(z) = result {
println!("result = {}", z)
}
}
fn process_value(iof: IntOrFloat) {
unsafe {
match iof {
IntOrFloat { i: 42 } => {
println!("meaning of life value 42", );
}
IntOrFloat { f } => {
println!("value = {}", f)
}
}
}
}
fn unions() {
let mut iof = IntOrFloat { i: 123 };
iof.i = 234;
let value = unsafe { iof.i };
println!("iof.i = {}", value);
process_value(IntOrFloat { i: 5 })
}
fn enums() {
enum Color {
Red,
Green,
Blue,
RgbColor(u8, u8, u8),
//tuple
Cmyk { cyan: u8, magenta: u8, yellow: u8, black: u8 }, //struct
}
let c: Color = Color::Cmyk { cyan: 0, magenta: 128, yellow: 0, black: 0 };
match c {
Color::Red => println!("r"),
Color::Green => println!("g"),
Color::Blue => println!("b"),
Color::RgbColor(0, 0, 0) => println!("color: black"),
Color::RgbColor(r, g, b) => println!("rgb({},{},{})", r, g, b),
Color::Cmyk { cyan: _, magenta: _, yellow: _, black: 255 } =>
println!("black"),
Color::Cmyk { cyan: a, magenta: b, yellow: c, black: d } => println!("cmyk({},{},{},{})", a, b,
c, d),
}
}
fn structures() {
struct Point {
x: f64,
y: f64,
}
let p = Point { x: 34.5, y: 4.0 };
println!("point p is at ({}, {})", p.x, p.y);
let p2 = Point { x: 3.0, y: 4.0 };
struct Line {
start: Point,
end: Point,
}
let myline = Line { start: p, end: p2 };
}
enum State {
Locked,
Failed,
Unlocked,
}
fn combination_lock() {
let code = String::from("1234");
let mut state = State::Locked;
let mut entry = String::new();
println!(" string = {}, code = {}", entry, code);
loop {
match state {
State::Locked => {
let mut input = String::new();
match stdin().read_line(&mut input) {
Ok(_) => entry.push_str(&input.trim_end()),
Err(_) => continue,
}
if entry == code {
state = State::Unlocked;
continue;
}
if !code.starts_with(&entry) {
state = State::Failed
}
}
State::Failed => {
println!("Failed");
entry.clear();
state = State::Locked;
continue;
}
State::Unlocked => {
println!("Unlocked");
return;
}
}
}
}
fn match_statement() {
let country_code = 44;
let country = match country_code {
44 => "UK",
46 => "Sweden",
7 => "Russia",
1..=999 => "unknown",
_ => "invalid",
};
println!("the country code {} is {}", country_code, country)
}
fn for_loop() {
for x in 1..11 {
if x == 3 {
continue;
}
if x == 8 {
break;
}
println!("x = {}", x)
}
for (pos, y) in (30..42).enumerate() {
println!("{} : {}", pos, y)
}
}
fn while_and_loop() {
let mut x = 1;
while x < 1000 {
x *= 2;
if x == 64 {
continue;
}
println!("x = {}", x)
}
let mut y = 1;
loop {
y *= 2;
println!("y = {}", y);
if y == 1 << 10 {
break;
}
}
}
fn if_statement() {
let temp = 25;
if temp > 30 {
println!("really hot outside")
} else if temp < 10 {
println!("really cold!")
} else {
println!("temperature is OK")
}
let day = if temp > 20 { "sunny" } else { "cloudy" };
println!("today is {}", day);
println!(
"is it {}",
if temp > 20 {
"hot"
} else if temp < 10 {
"cold"
} else {
"OK"
}
);
println!(
"it is {}",
if temp > 20 {
if temp > 30 {
"very hot"
} else {
"hot"
}
} else if temp < 10 {
"cold"
} else {
"OK"
}
)
}
fn | () {
let a = 123;
println!("a = {}", a);
let a = 777;
println!("a = {}", a);
{
let a = 888;
let b = 456;
println!("a = {}, b = {}", a, b);
}
}
fn operators() {
//arithmetic operators
let mut a = 2 + 3 * 4;
println!("{}", a);
a += 1;
a -= 2;
println!("remainder of {}/{} = {}", a, 3, (a % 3));
// let mut a_cubed = i16::pow(a, 3);
// let mut a_cubed = i32::pow( 4);
let b = 2.5;
let b_cubed = f64::powi(b, 3);
println!("b = {}", b);
let b_to_pi = f64::powf(b, std::f64::consts::PI);
println!("{} cubed = {}", b, b_cubed);
println!("{} pied = {}", b, b_to_pi);
//bitwise rotate
let c = 1 | 2;
println!("1 | 2 = {}", c);
let two_to_10 = 1 << 10;
println!("2^10 = {}", two_to_10);
//logical
let pi_less_4 = std::f64::consts::PI < 4.0;
let x = 5;
let x_is_5 = x == 5;
}
fn primitive_types() {
let a: u8 = 123;
let b: i8 = -123;
// println!("a = {}, b ={}", a, b);
// a = 432;
// b = 567;
// b = 122;
let mut c = 123456789; // 32-bit signed integer
println!("c = {}, size = {} bytes", c, mem::size_of_val(&c));
c = -1;
println!("c = {} after modification", c);
let z: isize = 123456789;
let size_of_z = mem::size_of_val(&z);
println!("z = {}, size = {}, {}-bit os", z, size_of_z, size_of_z * 8);
let d: char = 'x';
println!("d = {}, size = {}", d, mem::size_of_val(&d));
let e = 2.5; // double precision value, 8 bytes or 64 bits, f 64
println!("e = {}, size = {}", e, mem::size_of_val(&e));
let g = false;
println!("g = {}, size = {}", g, mem::size_of_val(&g));
}
| scope_and_shadowing | identifier_name |
main.rs | #![allow(dead_code)]
#![allow(unused_variables)]
use std::collections::{HashMap, HashSet};
use std::io::stdin;
use std::mem;
mod pm;
// const MEANING_OF_LIFE: u16 = 456; // no fixed address
fn main() {
// primitive_types ();
// operators();
// scope_and_shadowing();
// println!("const MEANING_OF_LIFE = {}", MEANING_OF_LIFE)
// if_statement();
// while_and_loop();
// match_statecment();
// for_loop();
// combination_lock();
// structures();
// enums();
// unions();
// process_value();
// option_T()
// array();
// slices();
// tuples();
// pm::pattern_matching();
// generics();
// vectors();
// hashmaps();
// hashsets();
// functions();
// methods();
closures();
// h_o_functions();
}
fn h_o_functions() {
}
fn closures() {
let sh = say_hello;
sh();
let plus_one = |x:i32| -> i32 {x+1};
let a = 6;
println!("{} +1 = {}", a, plus_one(a));
let plus_two = |x:isize| {
let mut z = x;
z+=2;
z
};
println!("{} +2 = {}", 3, plus_two(3));
}
fn say_hello() {println!("Hello")}
fn methods() {
struct Point {
x: f64,
y: f64
}
struct Line {
start: Point,
end: Point,
}
impl Line {
fn len(&self) -> f64 {
let dx = self.start.x - self.end.x;
let dy = self.start.y - self.end.y;
(dx*dx + dy*dy).sqrt()
}
}
let p = Point {x: 3.0, y: 4.0};
let p2 = Point {x: 5.0, y: 10.0};
let myline = Line { start: p, end: p2};
println!("lengh = {}", myline.len())
}
fn functions() {
print_value(33);
let mut z = 1;
increase1(&mut z);
println!("z is {}", z);
let a = 3;
let b = 5;
let p = product(a, b);
}
fn product(x: i32, y: i32) -> i32 {
// return x*y;
x * y
}
fn increase1(x: &mut i32) {
*x += 1;
}
fn print_value(x: i32) {
println!("x is {}", x)
}
fn hashsets() {
let mut greeks = HashSet::new();
greeks.insert("alfa");
greeks.insert("delta");
greeks.insert("hamma");
greeks.insert("delta");
println!("{:?}", greeks);
let added_delta = greeks.insert("delta");
if added_delta {
println!("We added delta! hooray!")
}
let added_vega = greeks.insert("vega");
if added_vega {
println!("We added vega! hooray!")
}
if !greeks.contains("kappa") {
println!("We don't have kappa")
}
}
fn hashmaps() {
let mut shapes = HashMap::new();
shapes.insert(String::from("triangle"), 3);
shapes.insert(String::from("square"), 4);
println!("hashmaps: {:?}", shapes);
println!("a square has {} sides", shapes["square"]);
shapes.insert("square".into(), 5);
println!("{:?}", shapes);
for (key, value) in &shapes {
println!("key: {}, value: {}", key, value);
}
shapes.entry("circle".into()).or_insert(1);
{
let actual = shapes.entry("circle".into()).or_insert(2);
*actual = 0;
}
println!("{:?}", shapes);
let _1_5: HashSet<_> = (1..=5).collect();
let _6_10: HashSet<_> = (6..=10).collect();
let _1_10: HashSet<_> = (1..=10).collect();
let _2_8: HashSet<_> = (2..=8).collect();
//subset
}
fn vectors() {
let mut a = Vec::new();
a.push(1);
a.push(2);
a.push(3);
println!("a = {:?}", a);
a.push(44);
println!("a = {:?}", a);
//usize isize
let idx: usize = 2;
println!("a[2] = {}", a[idx]);
match a.get(6) {
Some(x) => println!("a[6] = {}", x),
None => println!("error, no such element")
}
for x in &a { println!("{}", x) }
a.push(77);
println!("{:?}", a);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
while let Some(x) = a.pop() {
println!("{}", x)
}
}
fn generics() {
struct Point<T, V> {
x: T,
y: V,
}
struct Point1<T> {
x: T,
y: T,
}
struct Line<T> {
start: Point1<T>,
end: Point1<T>,
}
let a: Point<u16, i32> = Point { x: 0, y: 0 };
let b: Point<f64, f32> = Point { x: 1.2, y: 3.4 };
let c: Point<i32, f64> = Point { x: 3, y: 5.0 };
let d: Point<i32, f64> = Point { x: 1, y: 4.5 };
let x: Point1<f64> = Point1 { x: 1f64, y: 2f64 };
let y: Point1<f64> = Point1 { x: 3f64, y: 4f64 };
let myline = Line { start: x, end: y };
}
fn tuples() {
let x = 3;
let y = 4;
let sp = sum_and_product(x, y);
let (sum, product) = sp;
println!("sp = {:?}", (sum, product));
println!("{0} + {1} = {2}", x, y, sum);
println!("{0} + {1} = {2}", x, y, product);
let sp2 = sum_and_product(4, 7);
let combined = (sp, sp2);
println!("{1:?} , {2:?}, {0:?} ", combined.0, combined.1, combined);
println!("{1:?} , {2:?}, {0:?} ", combined.0, (combined.0).0, (combined
.0).1);
let ((c, d), (e, f)) = combined;
println!("{},{},{},{}", c, d, e, f);
let foo = (true, 42.0, -1i8);
println!("{:?}", foo);
let meaning = 42;
println!("{:?}", meaning);
}
fn sum_and_product(x: i32, y: i32) -> (i32, i32) {
(x + y, x * y)
}
fn use_slices(slice: &mut [i32]) {
println!("first elem = {}, len = {}", slice[0], slice.len());
slice[0] = 4444;
}
fn slices() {
let mut data = [1, 2, 3, 4, 5];
use_slices(&mut data[1..4]);
use_slices(&mut data);
println!("{:?}", data)
}
fn array() {
let mut a: [i32; 5] = [1, 2, 3, 4, 5, ];
println!("a has {} elements, first is {}", a.len(), a[0]);
a[0] = 321;
println!("a has {} elements, first is {}", a.len(), a[0]);
println!("{:?}", a);
if a == [321, 2, 3, 4, 5] {
println!("match");
}
let b = [1u64; 10];
for i in 0..b.len() {
println!("{}", b[i])
};
println!("b took up {} bytes", mem::size_of_val(&b));
println!("b {:?}", b);
let mtx: [[f64; 3]; 2] = [
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6]
];
println!("mtx = {:?}", mtx);
for i in 0..mtx.len() {
for j in 0..mtx[i].len() {
if i == j {
print!("diagonal: {} ", mtx[i][j]);
}
}
}
println!();
}
union IntOrFloat {
i: i32,
f: f32,
}
fn option_t() {
let x = 3.0;
let y = 1.0;
//Option
let result =
if y != 0.0 { Some(x / y) } else { None };
match result {
Some(z) => {
println!("{}/{} ={}", x, y, z)
}
None => println!("cannot divide by zero")
}
if let Some(z) = result {
println!("result = {}", z)
}
}
fn process_value(iof: IntOrFloat) {
unsafe {
match iof {
IntOrFloat { i: 42 } => {
println!("meaning of life value 42", );
}
IntOrFloat { f } => {
println!("value = {}", f)
}
}
}
}
fn unions() {
let mut iof = IntOrFloat { i: 123 };
iof.i = 234;
let value = unsafe { iof.i };
println!("iof.i = {}", value);
process_value(IntOrFloat { i: 5 })
}
fn enums() |
fn structures() {
struct Point {
x: f64,
y: f64,
}
let p = Point { x: 34.5, y: 4.0 };
println!("point p is at ({}, {})", p.x, p.y);
let p2 = Point { x: 3.0, y: 4.0 };
struct Line {
start: Point,
end: Point,
}
let myline = Line { start: p, end: p2 };
}
enum State {
Locked,
Failed,
Unlocked,
}
fn combination_lock() {
let code = String::from("1234");
let mut state = State::Locked;
let mut entry = String::new();
println!(" string = {}, code = {}", entry, code);
loop {
match state {
State::Locked => {
let mut input = String::new();
match stdin().read_line(&mut input) {
Ok(_) => entry.push_str(&input.trim_end()),
Err(_) => continue,
}
if entry == code {
state = State::Unlocked;
continue;
}
if !code.starts_with(&entry) {
state = State::Failed
}
}
State::Failed => {
println!("Failed");
entry.clear();
state = State::Locked;
continue;
}
State::Unlocked => {
println!("Unlocked");
return;
}
}
}
}
fn match_statement() {
let country_code = 44;
let country = match country_code {
44 => "UK",
46 => "Sweden",
7 => "Russia",
1..=999 => "unknown",
_ => "invalid",
};
println!("the country code {} is {}", country_code, country)
}
fn for_loop() {
for x in 1..11 {
if x == 3 {
continue;
}
if x == 8 {
break;
}
println!("x = {}", x)
}
for (pos, y) in (30..42).enumerate() {
println!("{} : {}", pos, y)
}
}
fn while_and_loop() {
let mut x = 1;
while x < 1000 {
x *= 2;
if x == 64 {
continue;
}
println!("x = {}", x)
}
let mut y = 1;
loop {
y *= 2;
println!("y = {}", y);
if y == 1 << 10 {
break;
}
}
}
fn if_statement() {
let temp = 25;
if temp > 30 {
println!("really hot outside")
} else if temp < 10 {
println!("really cold!")
} else {
println!("temperature is OK")
}
let day = if temp > 20 { "sunny" } else { "cloudy" };
println!("today is {}", day);
println!(
"is it {}",
if temp > 20 {
"hot"
} else if temp < 10 {
"cold"
} else {
"OK"
}
);
println!(
"it is {}",
if temp > 20 {
if temp > 30 {
"very hot"
} else {
"hot"
}
} else if temp < 10 {
"cold"
} else {
"OK"
}
)
}
fn scope_and_shadowing() {
let a = 123;
println!("a = {}", a);
let a = 777;
println!("a = {}", a);
{
let a = 888;
let b = 456;
println!("a = {}, b = {}", a, b);
}
}
fn operators() {
//arithmetic operators
let mut a = 2 + 3 * 4;
println!("{}", a);
a += 1;
a -= 2;
println!("remainder of {}/{} = {}", a, 3, (a % 3));
// let mut a_cubed = i16::pow(a, 3);
// let mut a_cubed = i32::pow( 4);
let b = 2.5;
let b_cubed = f64::powi(b, 3);
println!("b = {}", b);
let b_to_pi = f64::powf(b, std::f64::consts::PI);
println!("{} cubed = {}", b, b_cubed);
println!("{} pied = {}", b, b_to_pi);
//bitwise rotate
let c = 1 | 2;
println!("1 | 2 = {}", c);
let two_to_10 = 1 << 10;
println!("2^10 = {}", two_to_10);
//logical
let pi_less_4 = std::f64::consts::PI < 4.0;
let x = 5;
let x_is_5 = x == 5;
}
fn primitive_types() {
let a: u8 = 123;
let b: i8 = -123;
// println!("a = {}, b ={}", a, b);
// a = 432;
// b = 567;
// b = 122;
let mut c = 123456789; // 32-bit signed integer
println!("c = {}, size = {} bytes", c, mem::size_of_val(&c));
c = -1;
println!("c = {} after modification", c);
let z: isize = 123456789;
let size_of_z = mem::size_of_val(&z);
println!("z = {}, size = {}, {}-bit os", z, size_of_z, size_of_z * 8);
let d: char = 'x';
println!("d = {}, size = {}", d, mem::size_of_val(&d));
let e = 2.5; // double precision value, 8 bytes or 64 bits, f 64
println!("e = {}, size = {}", e, mem::size_of_val(&e));
let g = false;
println!("g = {}, size = {}", g, mem::size_of_val(&g));
}
| {
enum Color {
Red,
Green,
Blue,
RgbColor(u8, u8, u8),
//tuple
Cmyk { cyan: u8, magenta: u8, yellow: u8, black: u8 }, //struct
}
let c: Color = Color::Cmyk { cyan: 0, magenta: 128, yellow: 0, black: 0 };
match c {
Color::Red => println!("r"),
Color::Green => println!("g"),
Color::Blue => println!("b"),
Color::RgbColor(0, 0, 0) => println!("color: black"),
Color::RgbColor(r, g, b) => println!("rgb({},{},{})", r, g, b),
Color::Cmyk { cyan: _, magenta: _, yellow: _, black: 255 } =>
println!("black"),
Color::Cmyk { cyan: a, magenta: b, yellow: c, black: d } => println!("cmyk({},{},{},{})", a, b,
c, d),
}
} | identifier_body |
main.rs | #![allow(dead_code)]
#![allow(unused_variables)]
use std::collections::{HashMap, HashSet};
use std::io::stdin;
use std::mem;
mod pm;
// const MEANING_OF_LIFE: u16 = 456; // no fixed address
fn main() {
// primitive_types ();
// operators();
// scope_and_shadowing();
// println!("const MEANING_OF_LIFE = {}", MEANING_OF_LIFE)
// if_statement();
// while_and_loop();
// match_statecment();
// for_loop();
// combination_lock();
// structures();
// enums();
// unions();
// process_value();
// option_T()
// array();
// slices();
// tuples();
// pm::pattern_matching();
// generics();
// vectors();
// hashmaps();
// hashsets();
// functions();
// methods();
closures();
// h_o_functions();
}
fn h_o_functions() {
}
fn closures() {
let sh = say_hello;
sh();
let plus_one = |x:i32| -> i32 {x+1};
let a = 6;
println!("{} +1 = {}", a, plus_one(a));
let plus_two = |x:isize| {
let mut z = x;
z+=2;
z
};
println!("{} +2 = {}", 3, plus_two(3));
}
fn say_hello() {println!("Hello")}
fn methods() {
struct Point {
x: f64,
y: f64
}
struct Line {
start: Point,
end: Point,
}
impl Line {
fn len(&self) -> f64 {
let dx = self.start.x - self.end.x;
let dy = self.start.y - self.end.y;
(dx*dx + dy*dy).sqrt()
}
}
let p = Point {x: 3.0, y: 4.0};
let p2 = Point {x: 5.0, y: 10.0};
let myline = Line { start: p, end: p2};
println!("lengh = {}", myline.len())
}
fn functions() {
print_value(33);
let mut z = 1;
increase1(&mut z);
println!("z is {}", z);
let a = 3;
let b = 5;
let p = product(a, b);
}
fn product(x: i32, y: i32) -> i32 {
// return x*y;
x * y
}
fn increase1(x: &mut i32) {
*x += 1;
}
fn print_value(x: i32) {
println!("x is {}", x)
}
fn hashsets() {
let mut greeks = HashSet::new();
greeks.insert("alfa");
greeks.insert("delta");
greeks.insert("hamma");
greeks.insert("delta");
println!("{:?}", greeks);
let added_delta = greeks.insert("delta");
if added_delta {
println!("We added delta! hooray!")
}
let added_vega = greeks.insert("vega");
if added_vega {
println!("We added vega! hooray!")
}
if !greeks.contains("kappa") {
println!("We don't have kappa")
}
}
fn hashmaps() {
let mut shapes = HashMap::new();
shapes.insert(String::from("triangle"), 3);
shapes.insert(String::from("square"), 4);
println!("hashmaps: {:?}", shapes); | for (key, value) in &shapes {
println!("key: {}, value: {}", key, value);
}
shapes.entry("circle".into()).or_insert(1);
{
let actual = shapes.entry("circle".into()).or_insert(2);
*actual = 0;
}
println!("{:?}", shapes);
let _1_5: HashSet<_> = (1..=5).collect();
let _6_10: HashSet<_> = (6..=10).collect();
let _1_10: HashSet<_> = (1..=10).collect();
let _2_8: HashSet<_> = (2..=8).collect();
//subset
}
fn vectors() {
let mut a = Vec::new();
a.push(1);
a.push(2);
a.push(3);
println!("a = {:?}", a);
a.push(44);
println!("a = {:?}", a);
//usize isize
let idx: usize = 2;
println!("a[2] = {}", a[idx]);
match a.get(6) {
Some(x) => println!("a[6] = {}", x),
None => println!("error, no such element")
}
for x in &a { println!("{}", x) }
a.push(77);
println!("{:?}", a);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
while let Some(x) = a.pop() {
println!("{}", x)
}
}
fn generics() {
struct Point<T, V> {
x: T,
y: V,
}
struct Point1<T> {
x: T,
y: T,
}
struct Line<T> {
start: Point1<T>,
end: Point1<T>,
}
let a: Point<u16, i32> = Point { x: 0, y: 0 };
let b: Point<f64, f32> = Point { x: 1.2, y: 3.4 };
let c: Point<i32, f64> = Point { x: 3, y: 5.0 };
let d: Point<i32, f64> = Point { x: 1, y: 4.5 };
let x: Point1<f64> = Point1 { x: 1f64, y: 2f64 };
let y: Point1<f64> = Point1 { x: 3f64, y: 4f64 };
let myline = Line { start: x, end: y };
}
fn tuples() {
let x = 3;
let y = 4;
let sp = sum_and_product(x, y);
let (sum, product) = sp;
println!("sp = {:?}", (sum, product));
println!("{0} + {1} = {2}", x, y, sum);
println!("{0} + {1} = {2}", x, y, product);
let sp2 = sum_and_product(4, 7);
let combined = (sp, sp2);
println!("{1:?} , {2:?}, {0:?} ", combined.0, combined.1, combined);
println!("{1:?} , {2:?}, {0:?} ", combined.0, (combined.0).0, (combined
.0).1);
let ((c, d), (e, f)) = combined;
println!("{},{},{},{}", c, d, e, f);
let foo = (true, 42.0, -1i8);
println!("{:?}", foo);
let meaning = 42;
println!("{:?}", meaning);
}
fn sum_and_product(x: i32, y: i32) -> (i32, i32) {
(x + y, x * y)
}
fn use_slices(slice: &mut [i32]) {
println!("first elem = {}, len = {}", slice[0], slice.len());
slice[0] = 4444;
}
fn slices() {
let mut data = [1, 2, 3, 4, 5];
use_slices(&mut data[1..4]);
use_slices(&mut data);
println!("{:?}", data)
}
fn array() {
let mut a: [i32; 5] = [1, 2, 3, 4, 5, ];
println!("a has {} elements, first is {}", a.len(), a[0]);
a[0] = 321;
println!("a has {} elements, first is {}", a.len(), a[0]);
println!("{:?}", a);
if a == [321, 2, 3, 4, 5] {
println!("match");
}
let b = [1u64; 10];
for i in 0..b.len() {
println!("{}", b[i])
};
println!("b took up {} bytes", mem::size_of_val(&b));
println!("b {:?}", b);
let mtx: [[f64; 3]; 2] = [
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6]
];
println!("mtx = {:?}", mtx);
for i in 0..mtx.len() {
for j in 0..mtx[i].len() {
if i == j {
print!("diagonal: {} ", mtx[i][j]);
}
}
}
println!();
}
union IntOrFloat {
i: i32,
f: f32,
}
fn option_t() {
let x = 3.0;
let y = 1.0;
//Option
let result =
if y != 0.0 { Some(x / y) } else { None };
match result {
Some(z) => {
println!("{}/{} ={}", x, y, z)
}
None => println!("cannot divide by zero")
}
if let Some(z) = result {
println!("result = {}", z)
}
}
fn process_value(iof: IntOrFloat) {
unsafe {
match iof {
IntOrFloat { i: 42 } => {
println!("meaning of life value 42", );
}
IntOrFloat { f } => {
println!("value = {}", f)
}
}
}
}
fn unions() {
let mut iof = IntOrFloat { i: 123 };
iof.i = 234;
let value = unsafe { iof.i };
println!("iof.i = {}", value);
process_value(IntOrFloat { i: 5 })
}
fn enums() {
enum Color {
Red,
Green,
Blue,
RgbColor(u8, u8, u8),
//tuple
Cmyk { cyan: u8, magenta: u8, yellow: u8, black: u8 }, //struct
}
let c: Color = Color::Cmyk { cyan: 0, magenta: 128, yellow: 0, black: 0 };
match c {
Color::Red => println!("r"),
Color::Green => println!("g"),
Color::Blue => println!("b"),
Color::RgbColor(0, 0, 0) => println!("color: black"),
Color::RgbColor(r, g, b) => println!("rgb({},{},{})", r, g, b),
Color::Cmyk { cyan: _, magenta: _, yellow: _, black: 255 } =>
println!("black"),
Color::Cmyk { cyan: a, magenta: b, yellow: c, black: d } => println!("cmyk({},{},{},{})", a, b,
c, d),
}
}
fn structures() {
struct Point {
x: f64,
y: f64,
}
let p = Point { x: 34.5, y: 4.0 };
println!("point p is at ({}, {})", p.x, p.y);
let p2 = Point { x: 3.0, y: 4.0 };
struct Line {
start: Point,
end: Point,
}
let myline = Line { start: p, end: p2 };
}
enum State {
Locked,
Failed,
Unlocked,
}
fn combination_lock() {
let code = String::from("1234");
let mut state = State::Locked;
let mut entry = String::new();
println!(" string = {}, code = {}", entry, code);
loop {
match state {
State::Locked => {
let mut input = String::new();
match stdin().read_line(&mut input) {
Ok(_) => entry.push_str(&input.trim_end()),
Err(_) => continue,
}
if entry == code {
state = State::Unlocked;
continue;
}
if !code.starts_with(&entry) {
state = State::Failed
}
}
State::Failed => {
println!("Failed");
entry.clear();
state = State::Locked;
continue;
}
State::Unlocked => {
println!("Unlocked");
return;
}
}
}
}
fn match_statement() {
let country_code = 44;
let country = match country_code {
44 => "UK",
46 => "Sweden",
7 => "Russia",
1..=999 => "unknown",
_ => "invalid",
};
println!("the country code {} is {}", country_code, country)
}
fn for_loop() {
for x in 1..11 {
if x == 3 {
continue;
}
if x == 8 {
break;
}
println!("x = {}", x)
}
for (pos, y) in (30..42).enumerate() {
println!("{} : {}", pos, y)
}
}
fn while_and_loop() {
let mut x = 1;
while x < 1000 {
x *= 2;
if x == 64 {
continue;
}
println!("x = {}", x)
}
let mut y = 1;
loop {
y *= 2;
println!("y = {}", y);
if y == 1 << 10 {
break;
}
}
}
fn if_statement() {
let temp = 25;
if temp > 30 {
println!("really hot outside")
} else if temp < 10 {
println!("really cold!")
} else {
println!("temperature is OK")
}
let day = if temp > 20 { "sunny" } else { "cloudy" };
println!("today is {}", day);
println!(
"is it {}",
if temp > 20 {
"hot"
} else if temp < 10 {
"cold"
} else {
"OK"
}
);
println!(
"it is {}",
if temp > 20 {
if temp > 30 {
"very hot"
} else {
"hot"
}
} else if temp < 10 {
"cold"
} else {
"OK"
}
)
}
fn scope_and_shadowing() {
let a = 123;
println!("a = {}", a);
let a = 777;
println!("a = {}", a);
{
let a = 888;
let b = 456;
println!("a = {}, b = {}", a, b);
}
}
fn operators() {
//arithmetic operators
let mut a = 2 + 3 * 4;
println!("{}", a);
a += 1;
a -= 2;
println!("remainder of {}/{} = {}", a, 3, (a % 3));
// let mut a_cubed = i16::pow(a, 3);
// let mut a_cubed = i32::pow( 4);
let b = 2.5;
let b_cubed = f64::powi(b, 3);
println!("b = {}", b);
let b_to_pi = f64::powf(b, std::f64::consts::PI);
println!("{} cubed = {}", b, b_cubed);
println!("{} pied = {}", b, b_to_pi);
//bitwise rotate
let c = 1 | 2;
println!("1 | 2 = {}", c);
let two_to_10 = 1 << 10;
println!("2^10 = {}", two_to_10);
//logical
let pi_less_4 = std::f64::consts::PI < 4.0;
let x = 5;
let x_is_5 = x == 5;
}
fn primitive_types() {
let a: u8 = 123;
let b: i8 = -123;
// println!("a = {}, b ={}", a, b);
// a = 432;
// b = 567;
// b = 122;
let mut c = 123456789; // 32-bit signed integer
println!("c = {}, size = {} bytes", c, mem::size_of_val(&c));
c = -1;
println!("c = {} after modification", c);
let z: isize = 123456789;
let size_of_z = mem::size_of_val(&z);
println!("z = {}, size = {}, {}-bit os", z, size_of_z, size_of_z * 8);
let d: char = 'x';
println!("d = {}, size = {}", d, mem::size_of_val(&d));
let e = 2.5; // double precision value, 8 bytes or 64 bits, f 64
println!("e = {}, size = {}", e, mem::size_of_val(&e));
let g = false;
println!("g = {}, size = {}", g, mem::size_of_val(&g));
} | println!("a square has {} sides", shapes["square"]);
shapes.insert("square".into(), 5);
println!("{:?}", shapes);
| random_line_split |
main.rs | #![allow(dead_code)]
#![allow(unused_variables)]
use std::collections::{HashMap, HashSet};
use std::io::stdin;
use std::mem;
mod pm;
// const MEANING_OF_LIFE: u16 = 456; // no fixed address
fn main() {
// primitive_types ();
// operators();
// scope_and_shadowing();
// println!("const MEANING_OF_LIFE = {}", MEANING_OF_LIFE)
// if_statement();
// while_and_loop();
// match_statecment();
// for_loop();
// combination_lock();
// structures();
// enums();
// unions();
// process_value();
// option_T()
// array();
// slices();
// tuples();
// pm::pattern_matching();
// generics();
// vectors();
// hashmaps();
// hashsets();
// functions();
// methods();
closures();
// h_o_functions();
}
fn h_o_functions() {
}
fn closures() {
let sh = say_hello;
sh();
let plus_one = |x:i32| -> i32 {x+1};
let a = 6;
println!("{} +1 = {}", a, plus_one(a));
let plus_two = |x:isize| {
let mut z = x;
z+=2;
z
};
println!("{} +2 = {}", 3, plus_two(3));
}
fn say_hello() {println!("Hello")}
fn methods() {
struct Point {
x: f64,
y: f64
}
struct Line {
start: Point,
end: Point,
}
impl Line {
fn len(&self) -> f64 {
let dx = self.start.x - self.end.x;
let dy = self.start.y - self.end.y;
(dx*dx + dy*dy).sqrt()
}
}
let p = Point {x: 3.0, y: 4.0};
let p2 = Point {x: 5.0, y: 10.0};
let myline = Line { start: p, end: p2};
println!("lengh = {}", myline.len())
}
fn functions() {
print_value(33);
let mut z = 1;
increase1(&mut z);
println!("z is {}", z);
let a = 3;
let b = 5;
let p = product(a, b);
}
fn product(x: i32, y: i32) -> i32 {
// return x*y;
x * y
}
fn increase1(x: &mut i32) {
*x += 1;
}
fn print_value(x: i32) {
println!("x is {}", x)
}
fn hashsets() {
let mut greeks = HashSet::new();
greeks.insert("alfa");
greeks.insert("delta");
greeks.insert("hamma");
greeks.insert("delta");
println!("{:?}", greeks);
let added_delta = greeks.insert("delta");
if added_delta {
println!("We added delta! hooray!")
}
let added_vega = greeks.insert("vega");
if added_vega {
println!("We added vega! hooray!")
}
if !greeks.contains("kappa") {
println!("We don't have kappa")
}
}
fn hashmaps() {
let mut shapes = HashMap::new();
shapes.insert(String::from("triangle"), 3);
shapes.insert(String::from("square"), 4);
println!("hashmaps: {:?}", shapes);
println!("a square has {} sides", shapes["square"]);
shapes.insert("square".into(), 5);
println!("{:?}", shapes);
for (key, value) in &shapes {
println!("key: {}, value: {}", key, value);
}
shapes.entry("circle".into()).or_insert(1);
{
let actual = shapes.entry("circle".into()).or_insert(2);
*actual = 0;
}
println!("{:?}", shapes);
let _1_5: HashSet<_> = (1..=5).collect();
let _6_10: HashSet<_> = (6..=10).collect();
let _1_10: HashSet<_> = (1..=10).collect();
let _2_8: HashSet<_> = (2..=8).collect();
//subset
}
fn vectors() {
let mut a = Vec::new();
a.push(1);
a.push(2);
a.push(3);
println!("a = {:?}", a);
a.push(44);
println!("a = {:?}", a);
//usize isize
let idx: usize = 2;
println!("a[2] = {}", a[idx]);
match a.get(6) {
Some(x) => println!("a[6] = {}", x),
None => println!("error, no such element")
}
for x in &a { println!("{}", x) }
a.push(77);
println!("{:?}", a);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
let last_elem = a.pop();
println!("{:?}", last_elem);
while let Some(x) = a.pop() {
println!("{}", x)
}
}
fn generics() {
struct Point<T, V> {
x: T,
y: V,
}
struct Point1<T> {
x: T,
y: T,
}
struct Line<T> {
start: Point1<T>,
end: Point1<T>,
}
let a: Point<u16, i32> = Point { x: 0, y: 0 };
let b: Point<f64, f32> = Point { x: 1.2, y: 3.4 };
let c: Point<i32, f64> = Point { x: 3, y: 5.0 };
let d: Point<i32, f64> = Point { x: 1, y: 4.5 };
let x: Point1<f64> = Point1 { x: 1f64, y: 2f64 };
let y: Point1<f64> = Point1 { x: 3f64, y: 4f64 };
let myline = Line { start: x, end: y };
}
fn tuples() {
let x = 3;
let y = 4;
let sp = sum_and_product(x, y);
let (sum, product) = sp;
println!("sp = {:?}", (sum, product));
println!("{0} + {1} = {2}", x, y, sum);
println!("{0} + {1} = {2}", x, y, product);
let sp2 = sum_and_product(4, 7);
let combined = (sp, sp2);
println!("{1:?} , {2:?}, {0:?} ", combined.0, combined.1, combined);
println!("{1:?} , {2:?}, {0:?} ", combined.0, (combined.0).0, (combined
.0).1);
let ((c, d), (e, f)) = combined;
println!("{},{},{},{}", c, d, e, f);
let foo = (true, 42.0, -1i8);
println!("{:?}", foo);
let meaning = 42;
println!("{:?}", meaning);
}
fn sum_and_product(x: i32, y: i32) -> (i32, i32) {
(x + y, x * y)
}
fn use_slices(slice: &mut [i32]) {
println!("first elem = {}, len = {}", slice[0], slice.len());
slice[0] = 4444;
}
fn slices() {
let mut data = [1, 2, 3, 4, 5];
use_slices(&mut data[1..4]);
use_slices(&mut data);
println!("{:?}", data)
}
fn array() {
let mut a: [i32; 5] = [1, 2, 3, 4, 5, ];
println!("a has {} elements, first is {}", a.len(), a[0]);
a[0] = 321;
println!("a has {} elements, first is {}", a.len(), a[0]);
println!("{:?}", a);
if a == [321, 2, 3, 4, 5] {
println!("match");
}
let b = [1u64; 10];
for i in 0..b.len() {
println!("{}", b[i])
};
println!("b took up {} bytes", mem::size_of_val(&b));
println!("b {:?}", b);
let mtx: [[f64; 3]; 2] = [
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6]
];
println!("mtx = {:?}", mtx);
for i in 0..mtx.len() {
for j in 0..mtx[i].len() {
if i == j {
print!("diagonal: {} ", mtx[i][j]);
}
}
}
println!();
}
union IntOrFloat {
i: i32,
f: f32,
}
fn option_t() {
let x = 3.0;
let y = 1.0;
//Option
let result =
if y != 0.0 { Some(x / y) } else { None };
match result {
Some(z) => {
println!("{}/{} ={}", x, y, z)
}
None => println!("cannot divide by zero")
}
if let Some(z) = result {
println!("result = {}", z)
}
}
fn process_value(iof: IntOrFloat) {
unsafe {
match iof {
IntOrFloat { i: 42 } => {
println!("meaning of life value 42", );
}
IntOrFloat { f } => |
}
}
}
fn unions() {
let mut iof = IntOrFloat { i: 123 };
iof.i = 234;
let value = unsafe { iof.i };
println!("iof.i = {}", value);
process_value(IntOrFloat { i: 5 })
}
fn enums() {
enum Color {
Red,
Green,
Blue,
RgbColor(u8, u8, u8),
//tuple
Cmyk { cyan: u8, magenta: u8, yellow: u8, black: u8 }, //struct
}
let c: Color = Color::Cmyk { cyan: 0, magenta: 128, yellow: 0, black: 0 };
match c {
Color::Red => println!("r"),
Color::Green => println!("g"),
Color::Blue => println!("b"),
Color::RgbColor(0, 0, 0) => println!("color: black"),
Color::RgbColor(r, g, b) => println!("rgb({},{},{})", r, g, b),
Color::Cmyk { cyan: _, magenta: _, yellow: _, black: 255 } =>
println!("black"),
Color::Cmyk { cyan: a, magenta: b, yellow: c, black: d } => println!("cmyk({},{},{},{})", a, b,
c, d),
}
}
fn structures() {
struct Point {
x: f64,
y: f64,
}
let p = Point { x: 34.5, y: 4.0 };
println!("point p is at ({}, {})", p.x, p.y);
let p2 = Point { x: 3.0, y: 4.0 };
struct Line {
start: Point,
end: Point,
}
let myline = Line { start: p, end: p2 };
}
enum State {
Locked,
Failed,
Unlocked,
}
fn combination_lock() {
let code = String::from("1234");
let mut state = State::Locked;
let mut entry = String::new();
println!(" string = {}, code = {}", entry, code);
loop {
match state {
State::Locked => {
let mut input = String::new();
match stdin().read_line(&mut input) {
Ok(_) => entry.push_str(&input.trim_end()),
Err(_) => continue,
}
if entry == code {
state = State::Unlocked;
continue;
}
if !code.starts_with(&entry) {
state = State::Failed
}
}
State::Failed => {
println!("Failed");
entry.clear();
state = State::Locked;
continue;
}
State::Unlocked => {
println!("Unlocked");
return;
}
}
}
}
fn match_statement() {
let country_code = 44;
let country = match country_code {
44 => "UK",
46 => "Sweden",
7 => "Russia",
1..=999 => "unknown",
_ => "invalid",
};
println!("the country code {} is {}", country_code, country)
}
fn for_loop() {
for x in 1..11 {
if x == 3 {
continue;
}
if x == 8 {
break;
}
println!("x = {}", x)
}
for (pos, y) in (30..42).enumerate() {
println!("{} : {}", pos, y)
}
}
fn while_and_loop() {
let mut x = 1;
while x < 1000 {
x *= 2;
if x == 64 {
continue;
}
println!("x = {}", x)
}
let mut y = 1;
loop {
y *= 2;
println!("y = {}", y);
if y == 1 << 10 {
break;
}
}
}
fn if_statement() {
let temp = 25;
if temp > 30 {
println!("really hot outside")
} else if temp < 10 {
println!("really cold!")
} else {
println!("temperature is OK")
}
let day = if temp > 20 { "sunny" } else { "cloudy" };
println!("today is {}", day);
println!(
"is it {}",
if temp > 20 {
"hot"
} else if temp < 10 {
"cold"
} else {
"OK"
}
);
println!(
"it is {}",
if temp > 20 {
if temp > 30 {
"very hot"
} else {
"hot"
}
} else if temp < 10 {
"cold"
} else {
"OK"
}
)
}
fn scope_and_shadowing() {
let a = 123;
println!("a = {}", a);
let a = 777;
println!("a = {}", a);
{
let a = 888;
let b = 456;
println!("a = {}, b = {}", a, b);
}
}
fn operators() {
//arithmetic operators
let mut a = 2 + 3 * 4;
println!("{}", a);
a += 1;
a -= 2;
println!("remainder of {}/{} = {}", a, 3, (a % 3));
// let mut a_cubed = i16::pow(a, 3);
// let mut a_cubed = i32::pow( 4);
let b = 2.5;
let b_cubed = f64::powi(b, 3);
println!("b = {}", b);
let b_to_pi = f64::powf(b, std::f64::consts::PI);
println!("{} cubed = {}", b, b_cubed);
println!("{} pied = {}", b, b_to_pi);
//bitwise rotate
let c = 1 | 2;
println!("1 | 2 = {}", c);
let two_to_10 = 1 << 10;
println!("2^10 = {}", two_to_10);
//logical
let pi_less_4 = std::f64::consts::PI < 4.0;
let x = 5;
let x_is_5 = x == 5;
}
fn primitive_types() {
let a: u8 = 123;
let b: i8 = -123;
// println!("a = {}, b ={}", a, b);
// a = 432;
// b = 567;
// b = 122;
let mut c = 123456789; // 32-bit signed integer
println!("c = {}, size = {} bytes", c, mem::size_of_val(&c));
c = -1;
println!("c = {} after modification", c);
let z: isize = 123456789;
let size_of_z = mem::size_of_val(&z);
println!("z = {}, size = {}, {}-bit os", z, size_of_z, size_of_z * 8);
let d: char = 'x';
println!("d = {}, size = {}", d, mem::size_of_val(&d));
let e = 2.5; // double precision value, 8 bytes or 64 bits, f 64
println!("e = {}, size = {}", e, mem::size_of_val(&e));
let g = false;
println!("g = {}, size = {}", g, mem::size_of_val(&g));
}
| {
println!("value = {}", f)
} | conditional_block |
main.rs | use std::{env, io, fmt};
use std::time::{Duration, SystemTime};
use std::error::Error;
use std::collections::HashMap;
use tokio::sync;
use tokio::net::UdpSocket;
use log::{debug, info, warn};
use futures::select;
use futures::future::FutureExt;
// Delta between NTP epoch (1900-01-01 00:00:00) and Unix epoch (1970-01-01 00:00:00).
// Contains 53 non-leap years, and 17 leap years, in seconds, this is:
// (53 * 365 + 17 * 366) * 86400 = 2208988800.
const EPOCH_DELTA: u64 = 2_208_988_800;
// Tag name to use for messages without an explicit tag (i.e. currently those sent via
// `/send_after`).
const DEFAULT_TAG: &str = "default";
// Convert an OSC timetag into unix timestamp seconds and microseconds.
//
// [OSC timetags](http://opensoundcontrol.org/spec-1_0) use NTP timestamps
// (https://en.wikipedia.org/wiki/Network_Time_Protocol#Timestamps).
//
// TODO: verify time conversions are actually correct, check against other implementations
fn timetag_to_unix(ntp_secs: u32, ntp_frac_secs: u32) -> (u64, u32) {
let unix_secs = ntp_secs as u64 - EPOCH_DELTA;
let unix_micros = ((ntp_frac_secs as u64) * 1_000_000) >> 32;
(unix_secs, unix_micros as u32)
}
// TODO: verify time conversions are actually correct, check roundtrips
fn timetag_to_duration(ntp_secs: u32, ntp_frac_secs: u32) -> Duration {
let (unix_secs, unix_micros) = timetag_to_unix(ntp_secs, ntp_frac_secs);
// duration of time tag since epoch
let tt_since_epoch = Duration::new(unix_secs, unix_micros * 1000);
// duration of current system time since epoch
let now_since_epoch = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("System is set to before Unix epoch, check clock");
tt_since_epoch - now_since_epoch
}
/*
fn unix_to_timetag(unix_secs: u64, unix_micros: u32) -> (u32, u32) {
let ntp_secs = unix_secs + EPOCH_DELTA;
let ntp_frac_secs = ((unix_micros as u64 + 1) << 32) / 1_000_000;
(ntp_secs as u32, ntp_frac_secs as u32)
}
*/
struct Server {
/// Server's listening UDP socket.
socket: UdpSocket,
/// Internal buffer used for reading/writing UDP packets into.
buf: Vec<u8>,
/// Maps a tag name to sender/receiver pair. Used for signalling cancellations.
tags: HashMap<String, (sync::watch::Sender<bool>, sync::watch::Receiver<bool>)>,
}
impl Server {
pub async fn new(bind_addr: &str) -> Result<Self, io::Error> {
debug!("Attempting to bind to: {}", bind_addr);
let socket = UdpSocket::bind(bind_addr).await?;
info!("Listening on: {}", socket.local_addr()?);
Ok(Self {
socket,
buf: vec![0; 1024],
tags: HashMap::new(),
})
}
/// Main event loop, runs forever after server is started.
async fn run(&mut self) -> Result<(), io::Error> {
debug!("Starting main event loop");
loop {
if let Err(err) = self.next_event().await {
warn!("{}", err);
}
}
}
/// Called from main server event loop (`run()`) on each iteration.
///
/// Waits for incoming UDP packets containing OSC packets, either handling them immediately (in
/// the case of e.g. `/flush` messages), or spawning futures to handle them in the future (in
/// the case of e.g. `/send_after` bundles).
async fn next_event(&mut self) -> Result<(), ServerError> {
debug!("Waiting for UDP packet...");
let raw_packet = self.recv_udp_packet().await?;
debug!("Received UDP packet (size={})", raw_packet.len());
debug!("Parsing OSC packet...");
let osc_packet = rosc::decoder::decode(raw_packet)?;
debug!("Received OSC packet: {:?}", osc_packet);
match osc_packet {
rosc::OscPacket::Message(msg) => {
match msg.addr.as_ref() {
"/flush" => self.handle_msg_flush(&msg),
addr => {
let msg = format!("Ignoring unhandled OSC address: {}", addr);
return Err(ServerError::Protocol(msg));
}
}
},
rosc::OscPacket::Bundle(bundle) => {
if let rosc::OscType::Time(ntp_secs, ntp_subsecs) = bundle.timetag {
match bundle.content.first() {
Some(rosc::OscPacket::Message(msg)) => {
match msg.addr.as_ref() {
"/send_after" => self.handle_bundle_send_after(
DEFAULT_TAG,
timetag_to_duration(ntp_secs, ntp_subsecs),
&msg.args
),
"/send_after_tagged" => {
match Self::parse_send_after_tag(&msg.args) {
Ok(tag) => self.handle_bundle_send_after(
&tag,
timetag_to_duration(ntp_secs, ntp_subsecs),
&msg.args[1..], // 1st argument is tag, already parsed
),
Err(err) => {
let msg = format!("Unexpected tag argument: {}", err);
return Err(ServerError::Protocol(msg));
},
}
},
addr => {
let msg = format!("Unhandled OSC address: {}", addr);
return Err(ServerError::Protocol(msg));
},
}
},
other => {
let msg = format!("Unexpected OSC bundle content: {:?}", other);
return Err(ServerError::Protocol(msg));
}
}
}
},
}
Ok(())
}
/// Await UDP packet. Returns slice into server's buffer.
async fn recv_udp_packet(&mut self) -> Result<&[u8], io::Error> {
let (size, _) = self.socket.recv_from(&mut self.buf).await?;
Ok(&self.buf[..size])
}
/// Handles /flush messages.
fn handle_msg_flush(&mut self, msg: &rosc::OscMessage) {
match msg.args.first() {
Some(rosc::OscType::String(tag)) => {
// Remove tag entry from hash map, and send termination signal to all listening
// receivers.
if let Some((_k, (tx, _rx))) = self.tags.remove_entry(tag) {
debug!("Flushing tag: {}", tag);
tx.broadcast(true).unwrap_or_else(|e| {
warn!("Failed to broadcast: {}", e);
});
}
},
other => warn!("Ignoring unexpected /flush message: {:?}", other),
};
}
/// Handles /send_after and /send_after_tagged bundles.
fn handle_bundle_send_after(&mut self, tag: &str, send_after: Duration, msg_args: &[rosc::OscType]) {
let udp_addr = match Self::parse_command_address(msg_args) {
Ok(addr) => addr,
Err(err) => {
warn!("Ignoring message: {}", err);
return;
},
};
// addr and OSX /<foo> addr
let osc_cmd_addr = match msg_args.get(2) {
Some(rosc::OscType::String(addr)) => addr,
other => {
warn!("Unexpected addr argument: {:?}", other);
return;
},
};
// remove host, port, address from command
let remaining_args = &msg_args[3..];
debug!("Sending OSC command {:?} in: {}ms", remaining_args, send_after.as_millis());
let new_msg = rosc::OscMessage {
addr: osc_cmd_addr.to_owned(),
args: remaining_args.to_vec(),
};
let packet = rosc::OscPacket::Message(new_msg);
let new_buf = match rosc::encoder::encode(&packet) {
Ok(buf) => buf,
Err(err) => {
warn!("Failed to encode requested OSC message: {:?}", err);
return;
}
};
let (_tx, rx) = self.tags.entry(tag.to_owned())
.or_insert_with(|| tokio::sync::watch::channel(false));
let mut rx = rx.clone();
tokio::spawn(async move {
// TODO: better way of doing this, configurable addr, etc.
let loopback = std::net::Ipv4Addr::new(127, 0, 0, 1);
let addr = std::net::SocketAddrV4::new(loopback, 0);
// TODO: error handling
let mut socket = UdpSocket::bind(addr).await.unwrap();
// check if already cancelled, disregard initial value if not
if let Some(true) = rx.recv().await {
debug!("cancelled timer");
return;
}
loop {
select! {
_ = tokio::time::delay_for(send_after).fuse() => break,
cancel = rx.recv().fuse() => {
match cancel {
Some(true) => {
debug!("cancelled timer");
return;
},
// `false` should never be set, but ignore if received
_ => {},
}
},
}
}
// TODO: error handling
debug!("Sending OSC command to: {}", &udp_addr);
match socket.send_to(&new_buf, &udp_addr).await {
Ok(_) => debug!("OSC command sent"),
Err(err) => warn!("Failed to send UDP OSC message: {}", err),
}
});
}
fn parse_send_after_tag(msg_args: &[rosc::OscType]) -> Result<String, String> {
match msg_args.first() {
Some(rosc::OscType::String(tag)) => Ok(tag.to_owned()),
other => Err(format!("Unexpected tag argument: {:?}", other)),
}
}
// TODO: error type
/// Parse OSC server address (host and port) from given OSC message arguments (typically from
/// `/send_after` messages).
fn parse_command_address(msg_args: &[rosc::OscType]) -> Result<String, String> {
let host = match msg_args.first() {
Some(rosc::OscType::String(host)) => {
// Workaround for https://github.com/rust-lang/rust/issues/34202
// affecting OS X / Windows
// TODO: check v6 status of Sonic Pi
if host == "localhost" {
"127.0.0.1"
} else {
host
}
},
other => return Err(format!("Unexpected host argument: {:?}", other)),
};
let port = match msg_args.get(1) {
Some(rosc::OscType::Int(port)) => port,
other => return Err(format!("Unexpected port argument: {:?}", other)),
};
Ok(format!("{}:{}", host, port))
}
}
#[derive(Debug)]
enum ServerError {
/// Network error, typically caused by UDP send/recv here.
Io(io::Error),
/// OSC error, typically caused by failing to encode/decode OSC data structures.
Osc(rosc::OscError),
/// Error in cases where valid OSC packets were received, but containing invalid payloads (e.g.
/// a `/send_after` containing unexpected arguments).
Protocol(String),
}
impl fmt::Display for ServerError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::Io(err) => write!(f, "IO error: {}", err),
Self::Osc(err) => write!(f, "Failed to decode OSC packet: {:?}", err),
Self::Protocol(err) => write!(f, "{}", err),
}
}
}
impl Error for ServerError {}
impl From<io::Error> for ServerError {
fn from(err: io::Error) -> Self {
Self::Io(err)
}
}
impl From<rosc::OscError> for ServerError {
fn from(err: rosc::OscError) -> Self {
Self::Osc(err)
}
}
#[tokio::main]
async fn main() -> Result<(), io::Error> {
env_logger::init(); |
#[cfg(test)]
mod tests {
use crate::timetag_to_unix;
#[test]
fn time_tag_to_unix_1() {
// 2^32 / 2 fractional seconds, i.e. 500,000μs
assert_eq!(timetag_to_unix(3_608_146_800, 2_147_483_648), (1_399_158_000, 500_000));
}
#[test]
fn time_tag_to_unix_2() {
assert_eq!(timetag_to_unix(3549086042, 4010129359), (1340097242, 933680));
}
#[test]
fn time_tag_to_unix_seconds_only() {
assert_eq!(timetag_to_unix(3_608_146_800, 0), (1_399_158_000, 0));
}
// TODO: tests for time tags in the past, invalid time tags, once error requirement determined
} |
let addr = env::args().nth(1).unwrap_or_else(|| "127.0.0.1:4560".to_string());
Server::new(&addr).await?.run().await
} | random_line_split |
main.rs | use std::{env, io, fmt};
use std::time::{Duration, SystemTime};
use std::error::Error;
use std::collections::HashMap;
use tokio::sync;
use tokio::net::UdpSocket;
use log::{debug, info, warn};
use futures::select;
use futures::future::FutureExt;
// Delta between NTP epoch (1900-01-01 00:00:00) and Unix epoch (1970-01-01 00:00:00).
// Contains 53 non-leap years, and 17 leap years, in seconds, this is:
// (53 * 365 + 17 * 366) * 86400 = 2208988800.
const EPOCH_DELTA: u64 = 2_208_988_800;
// Tag name to use for messages without an explicit tag (i.e. currently those sent via
// `/send_after`).
const DEFAULT_TAG: &str = "default";
// Convert an OSC timetag into unix timestamp seconds and microseconds.
//
// [OSC timetags](http://opensoundcontrol.org/spec-1_0) use NTP timestamps
// (https://en.wikipedia.org/wiki/Network_Time_Protocol#Timestamps).
//
// TODO: verify time conversions are actually correct, check against other implementations
fn timetag_to_unix(ntp_secs: u32, ntp_frac_secs: u32) -> (u64, u32) {
let unix_secs = ntp_secs as u64 - EPOCH_DELTA;
let unix_micros = ((ntp_frac_secs as u64) * 1_000_000) >> 32;
(unix_secs, unix_micros as u32)
}
// TODO: verify time conversions are actually correct, check roundtrips
fn timetag_to_duration(ntp_secs: u32, ntp_frac_secs: u32) -> Duration {
let (unix_secs, unix_micros) = timetag_to_unix(ntp_secs, ntp_frac_secs);
// duration of time tag since epoch
let tt_since_epoch = Duration::new(unix_secs, unix_micros * 1000);
// duration of current system time since epoch
let now_since_epoch = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("System is set to before Unix epoch, check clock");
tt_since_epoch - now_since_epoch
}
/*
fn unix_to_timetag(unix_secs: u64, unix_micros: u32) -> (u32, u32) {
let ntp_secs = unix_secs + EPOCH_DELTA;
let ntp_frac_secs = ((unix_micros as u64 + 1) << 32) / 1_000_000;
(ntp_secs as u32, ntp_frac_secs as u32)
}
*/
struct Server {
/// Server's listening UDP socket.
socket: UdpSocket,
/// Internal buffer used for reading/writing UDP packets into.
buf: Vec<u8>,
/// Maps a tag name to sender/receiver pair. Used for signalling cancellations.
tags: HashMap<String, (sync::watch::Sender<bool>, sync::watch::Receiver<bool>)>,
}
impl Server {
pub async fn new(bind_addr: &str) -> Result<Self, io::Error> {
debug!("Attempting to bind to: {}", bind_addr);
let socket = UdpSocket::bind(bind_addr).await?;
info!("Listening on: {}", socket.local_addr()?);
Ok(Self {
socket,
buf: vec![0; 1024],
tags: HashMap::new(),
})
}
/// Main event loop, runs forever after server is started.
async fn run(&mut self) -> Result<(), io::Error> {
debug!("Starting main event loop");
loop {
if let Err(err) = self.next_event().await {
warn!("{}", err);
}
}
}
/// Called from main server event loop (`run()`) on each iteration.
///
/// Waits for incoming UDP packets containing OSC packets, either handling them immediately (in
/// the case of e.g. `/flush` messages), or spawning futures to handle them in the future (in
/// the case of e.g. `/send_after` bundles).
async fn next_event(&mut self) -> Result<(), ServerError> {
debug!("Waiting for UDP packet...");
let raw_packet = self.recv_udp_packet().await?;
debug!("Received UDP packet (size={})", raw_packet.len());
debug!("Parsing OSC packet...");
let osc_packet = rosc::decoder::decode(raw_packet)?;
debug!("Received OSC packet: {:?}", osc_packet);
match osc_packet {
rosc::OscPacket::Message(msg) => {
match msg.addr.as_ref() {
"/flush" => self.handle_msg_flush(&msg),
addr => {
let msg = format!("Ignoring unhandled OSC address: {}", addr);
return Err(ServerError::Protocol(msg));
}
}
},
rosc::OscPacket::Bundle(bundle) => {
if let rosc::OscType::Time(ntp_secs, ntp_subsecs) = bundle.timetag {
match bundle.content.first() {
Some(rosc::OscPacket::Message(msg)) => {
match msg.addr.as_ref() {
"/send_after" => self.handle_bundle_send_after(
DEFAULT_TAG,
timetag_to_duration(ntp_secs, ntp_subsecs),
&msg.args
),
"/send_after_tagged" => {
match Self::parse_send_after_tag(&msg.args) {
Ok(tag) => self.handle_bundle_send_after(
&tag,
timetag_to_duration(ntp_secs, ntp_subsecs),
&msg.args[1..], // 1st argument is tag, already parsed
),
Err(err) => {
let msg = format!("Unexpected tag argument: {}", err);
return Err(ServerError::Protocol(msg));
},
}
},
addr => {
let msg = format!("Unhandled OSC address: {}", addr);
return Err(ServerError::Protocol(msg));
},
}
},
other => {
let msg = format!("Unexpected OSC bundle content: {:?}", other);
return Err(ServerError::Protocol(msg));
}
}
}
},
}
Ok(())
}
/// Await UDP packet. Returns slice into server's buffer.
async fn | (&mut self) -> Result<&[u8], io::Error> {
let (size, _) = self.socket.recv_from(&mut self.buf).await?;
Ok(&self.buf[..size])
}
/// Handles /flush messages.
fn handle_msg_flush(&mut self, msg: &rosc::OscMessage) {
match msg.args.first() {
Some(rosc::OscType::String(tag)) => {
// Remove tag entry from hash map, and send termination signal to all listening
// receivers.
if let Some((_k, (tx, _rx))) = self.tags.remove_entry(tag) {
debug!("Flushing tag: {}", tag);
tx.broadcast(true).unwrap_or_else(|e| {
warn!("Failed to broadcast: {}", e);
});
}
},
other => warn!("Ignoring unexpected /flush message: {:?}", other),
};
}
/// Handles /send_after and /send_after_tagged bundles.
fn handle_bundle_send_after(&mut self, tag: &str, send_after: Duration, msg_args: &[rosc::OscType]) {
let udp_addr = match Self::parse_command_address(msg_args) {
Ok(addr) => addr,
Err(err) => {
warn!("Ignoring message: {}", err);
return;
},
};
// addr and OSX /<foo> addr
let osc_cmd_addr = match msg_args.get(2) {
Some(rosc::OscType::String(addr)) => addr,
other => {
warn!("Unexpected addr argument: {:?}", other);
return;
},
};
// remove host, port, address from command
let remaining_args = &msg_args[3..];
debug!("Sending OSC command {:?} in: {}ms", remaining_args, send_after.as_millis());
let new_msg = rosc::OscMessage {
addr: osc_cmd_addr.to_owned(),
args: remaining_args.to_vec(),
};
let packet = rosc::OscPacket::Message(new_msg);
let new_buf = match rosc::encoder::encode(&packet) {
Ok(buf) => buf,
Err(err) => {
warn!("Failed to encode requested OSC message: {:?}", err);
return;
}
};
let (_tx, rx) = self.tags.entry(tag.to_owned())
.or_insert_with(|| tokio::sync::watch::channel(false));
let mut rx = rx.clone();
tokio::spawn(async move {
// TODO: better way of doing this, configurable addr, etc.
let loopback = std::net::Ipv4Addr::new(127, 0, 0, 1);
let addr = std::net::SocketAddrV4::new(loopback, 0);
// TODO: error handling
let mut socket = UdpSocket::bind(addr).await.unwrap();
// check if already cancelled, disregard initial value if not
if let Some(true) = rx.recv().await {
debug!("cancelled timer");
return;
}
loop {
select! {
_ = tokio::time::delay_for(send_after).fuse() => break,
cancel = rx.recv().fuse() => {
match cancel {
Some(true) => {
debug!("cancelled timer");
return;
},
// `false` should never be set, but ignore if received
_ => {},
}
},
}
}
// TODO: error handling
debug!("Sending OSC command to: {}", &udp_addr);
match socket.send_to(&new_buf, &udp_addr).await {
Ok(_) => debug!("OSC command sent"),
Err(err) => warn!("Failed to send UDP OSC message: {}", err),
}
});
}
fn parse_send_after_tag(msg_args: &[rosc::OscType]) -> Result<String, String> {
match msg_args.first() {
Some(rosc::OscType::String(tag)) => Ok(tag.to_owned()),
other => Err(format!("Unexpected tag argument: {:?}", other)),
}
}
// TODO: error type
/// Parse OSC server address (host and port) from given OSC message arguments (typically from
/// `/send_after` messages).
fn parse_command_address(msg_args: &[rosc::OscType]) -> Result<String, String> {
let host = match msg_args.first() {
Some(rosc::OscType::String(host)) => {
// Workaround for https://github.com/rust-lang/rust/issues/34202
// affecting OS X / Windows
// TODO: check v6 status of Sonic Pi
if host == "localhost" {
"127.0.0.1"
} else {
host
}
},
other => return Err(format!("Unexpected host argument: {:?}", other)),
};
let port = match msg_args.get(1) {
Some(rosc::OscType::Int(port)) => port,
other => return Err(format!("Unexpected port argument: {:?}", other)),
};
Ok(format!("{}:{}", host, port))
}
}
#[derive(Debug)]
enum ServerError {
/// Network error, typically caused by UDP send/recv here.
Io(io::Error),
/// OSC error, typically caused by failing to encode/decode OSC data structures.
Osc(rosc::OscError),
/// Error in cases where valid OSC packets were received, but containing invalid payloads (e.g.
/// a `/send_after` containing unexpected arguments).
Protocol(String),
}
impl fmt::Display for ServerError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::Io(err) => write!(f, "IO error: {}", err),
Self::Osc(err) => write!(f, "Failed to decode OSC packet: {:?}", err),
Self::Protocol(err) => write!(f, "{}", err),
}
}
}
impl Error for ServerError {}
impl From<io::Error> for ServerError {
fn from(err: io::Error) -> Self {
Self::Io(err)
}
}
impl From<rosc::OscError> for ServerError {
fn from(err: rosc::OscError) -> Self {
Self::Osc(err)
}
}
#[tokio::main]
async fn main() -> Result<(), io::Error> {
env_logger::init();
let addr = env::args().nth(1).unwrap_or_else(|| "127.0.0.1:4560".to_string());
Server::new(&addr).await?.run().await
}
#[cfg(test)]
mod tests {
use crate::timetag_to_unix;
#[test]
fn time_tag_to_unix_1() {
// 2^32 / 2 fractional seconds, i.e. 500,000μs
assert_eq!(timetag_to_unix(3_608_146_800, 2_147_483_648), (1_399_158_000, 500_000));
}
#[test]
fn time_tag_to_unix_2() {
assert_eq!(timetag_to_unix(3549086042, 4010129359), (1340097242, 933680));
}
#[test]
fn time_tag_to_unix_seconds_only() {
assert_eq!(timetag_to_unix(3_608_146_800, 0), (1_399_158_000, 0));
}
// TODO: tests for time tags in the past, invalid time tags, once error requirement determined
}
| recv_udp_packet | identifier_name |
main.rs | use std::{env, io, fmt};
use std::time::{Duration, SystemTime};
use std::error::Error;
use std::collections::HashMap;
use tokio::sync;
use tokio::net::UdpSocket;
use log::{debug, info, warn};
use futures::select;
use futures::future::FutureExt;
// Delta between NTP epoch (1900-01-01 00:00:00) and Unix epoch (1970-01-01 00:00:00).
// Contains 53 non-leap years, and 17 leap years, in seconds, this is:
// (53 * 365 + 17 * 366) * 86400 = 2208988800.
const EPOCH_DELTA: u64 = 2_208_988_800;
// Tag name to use for messages without an explicit tag (i.e. currently those sent via
// `/send_after`).
const DEFAULT_TAG: &str = "default";
// Convert an OSC timetag into unix timestamp seconds and microseconds.
//
// [OSC timetags](http://opensoundcontrol.org/spec-1_0) use NTP timestamps
// (https://en.wikipedia.org/wiki/Network_Time_Protocol#Timestamps).
//
// TODO: verify time conversions are actually correct, check against other implementations
fn timetag_to_unix(ntp_secs: u32, ntp_frac_secs: u32) -> (u64, u32) {
let unix_secs = ntp_secs as u64 - EPOCH_DELTA;
let unix_micros = ((ntp_frac_secs as u64) * 1_000_000) >> 32;
(unix_secs, unix_micros as u32)
}
// TODO: verify time conversions are actually correct, check roundtrips
fn timetag_to_duration(ntp_secs: u32, ntp_frac_secs: u32) -> Duration {
let (unix_secs, unix_micros) = timetag_to_unix(ntp_secs, ntp_frac_secs);
// duration of time tag since epoch
let tt_since_epoch = Duration::new(unix_secs, unix_micros * 1000);
// duration of current system time since epoch
let now_since_epoch = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("System is set to before Unix epoch, check clock");
tt_since_epoch - now_since_epoch
}
/*
fn unix_to_timetag(unix_secs: u64, unix_micros: u32) -> (u32, u32) {
let ntp_secs = unix_secs + EPOCH_DELTA;
let ntp_frac_secs = ((unix_micros as u64 + 1) << 32) / 1_000_000;
(ntp_secs as u32, ntp_frac_secs as u32)
}
*/
struct Server {
/// Server's listening UDP socket.
socket: UdpSocket,
/// Internal buffer used for reading/writing UDP packets into.
buf: Vec<u8>,
/// Maps a tag name to sender/receiver pair. Used for signalling cancellations.
tags: HashMap<String, (sync::watch::Sender<bool>, sync::watch::Receiver<bool>)>,
}
impl Server {
pub async fn new(bind_addr: &str) -> Result<Self, io::Error> {
debug!("Attempting to bind to: {}", bind_addr);
let socket = UdpSocket::bind(bind_addr).await?;
info!("Listening on: {}", socket.local_addr()?);
Ok(Self {
socket,
buf: vec![0; 1024],
tags: HashMap::new(),
})
}
/// Main event loop, runs forever after server is started.
async fn run(&mut self) -> Result<(), io::Error> {
debug!("Starting main event loop");
loop {
if let Err(err) = self.next_event().await {
warn!("{}", err);
}
}
}
/// Called from main server event loop (`run()`) on each iteration.
///
/// Waits for incoming UDP packets containing OSC packets, either handling them immediately (in
/// the case of e.g. `/flush` messages), or spawning futures to handle them in the future (in
/// the case of e.g. `/send_after` bundles).
async fn next_event(&mut self) -> Result<(), ServerError> {
debug!("Waiting for UDP packet...");
let raw_packet = self.recv_udp_packet().await?;
debug!("Received UDP packet (size={})", raw_packet.len());
debug!("Parsing OSC packet...");
let osc_packet = rosc::decoder::decode(raw_packet)?;
debug!("Received OSC packet: {:?}", osc_packet);
match osc_packet {
rosc::OscPacket::Message(msg) => {
match msg.addr.as_ref() {
"/flush" => self.handle_msg_flush(&msg),
addr => {
let msg = format!("Ignoring unhandled OSC address: {}", addr);
return Err(ServerError::Protocol(msg));
}
}
},
rosc::OscPacket::Bundle(bundle) => {
if let rosc::OscType::Time(ntp_secs, ntp_subsecs) = bundle.timetag {
match bundle.content.first() {
Some(rosc::OscPacket::Message(msg)) => {
match msg.addr.as_ref() {
"/send_after" => self.handle_bundle_send_after(
DEFAULT_TAG,
timetag_to_duration(ntp_secs, ntp_subsecs),
&msg.args
),
"/send_after_tagged" => {
match Self::parse_send_after_tag(&msg.args) {
Ok(tag) => self.handle_bundle_send_after(
&tag,
timetag_to_duration(ntp_secs, ntp_subsecs),
&msg.args[1..], // 1st argument is tag, already parsed
),
Err(err) => {
let msg = format!("Unexpected tag argument: {}", err);
return Err(ServerError::Protocol(msg));
},
}
},
addr => {
let msg = format!("Unhandled OSC address: {}", addr);
return Err(ServerError::Protocol(msg));
},
}
},
other => {
let msg = format!("Unexpected OSC bundle content: {:?}", other);
return Err(ServerError::Protocol(msg));
}
}
}
},
}
Ok(())
}
/// Await UDP packet. Returns slice into server's buffer.
async fn recv_udp_packet(&mut self) -> Result<&[u8], io::Error> {
let (size, _) = self.socket.recv_from(&mut self.buf).await?;
Ok(&self.buf[..size])
}
/// Handles /flush messages.
fn handle_msg_flush(&mut self, msg: &rosc::OscMessage) {
match msg.args.first() {
Some(rosc::OscType::String(tag)) => {
// Remove tag entry from hash map, and send termination signal to all listening
// receivers.
if let Some((_k, (tx, _rx))) = self.tags.remove_entry(tag) {
debug!("Flushing tag: {}", tag);
tx.broadcast(true).unwrap_or_else(|e| {
warn!("Failed to broadcast: {}", e);
});
}
},
other => warn!("Ignoring unexpected /flush message: {:?}", other),
};
}
/// Handles /send_after and /send_after_tagged bundles.
fn handle_bundle_send_after(&mut self, tag: &str, send_after: Duration, msg_args: &[rosc::OscType]) {
let udp_addr = match Self::parse_command_address(msg_args) {
Ok(addr) => addr,
Err(err) => {
warn!("Ignoring message: {}", err);
return;
},
};
// addr and OSX /<foo> addr
let osc_cmd_addr = match msg_args.get(2) {
Some(rosc::OscType::String(addr)) => addr,
other => {
warn!("Unexpected addr argument: {:?}", other);
return;
},
};
// remove host, port, address from command
let remaining_args = &msg_args[3..];
debug!("Sending OSC command {:?} in: {}ms", remaining_args, send_after.as_millis());
let new_msg = rosc::OscMessage {
addr: osc_cmd_addr.to_owned(),
args: remaining_args.to_vec(),
};
let packet = rosc::OscPacket::Message(new_msg);
let new_buf = match rosc::encoder::encode(&packet) {
Ok(buf) => buf,
Err(err) => {
warn!("Failed to encode requested OSC message: {:?}", err);
return;
}
};
let (_tx, rx) = self.tags.entry(tag.to_owned())
.or_insert_with(|| tokio::sync::watch::channel(false));
let mut rx = rx.clone();
tokio::spawn(async move {
// TODO: better way of doing this, configurable addr, etc.
let loopback = std::net::Ipv4Addr::new(127, 0, 0, 1);
let addr = std::net::SocketAddrV4::new(loopback, 0);
// TODO: error handling
let mut socket = UdpSocket::bind(addr).await.unwrap();
// check if already cancelled, disregard initial value if not
if let Some(true) = rx.recv().await {
debug!("cancelled timer");
return;
}
loop {
select! {
_ = tokio::time::delay_for(send_after).fuse() => break,
cancel = rx.recv().fuse() => {
match cancel {
Some(true) => {
debug!("cancelled timer");
return;
},
// `false` should never be set, but ignore if received
_ => {},
}
},
}
}
// TODO: error handling
debug!("Sending OSC command to: {}", &udp_addr);
match socket.send_to(&new_buf, &udp_addr).await {
Ok(_) => debug!("OSC command sent"),
Err(err) => warn!("Failed to send UDP OSC message: {}", err),
}
});
}
fn parse_send_after_tag(msg_args: &[rosc::OscType]) -> Result<String, String> {
match msg_args.first() {
Some(rosc::OscType::String(tag)) => Ok(tag.to_owned()),
other => Err(format!("Unexpected tag argument: {:?}", other)),
}
}
// TODO: error type
/// Parse OSC server address (host and port) from given OSC message arguments (typically from
/// `/send_after` messages).
fn parse_command_address(msg_args: &[rosc::OscType]) -> Result<String, String> {
let host = match msg_args.first() {
Some(rosc::OscType::String(host)) => {
// Workaround for https://github.com/rust-lang/rust/issues/34202
// affecting OS X / Windows
// TODO: check v6 status of Sonic Pi
if host == "localhost" {
"127.0.0.1"
} else {
host
}
},
other => return Err(format!("Unexpected host argument: {:?}", other)),
};
let port = match msg_args.get(1) {
Some(rosc::OscType::Int(port)) => port,
other => return Err(format!("Unexpected port argument: {:?}", other)),
};
Ok(format!("{}:{}", host, port))
}
}
#[derive(Debug)]
enum ServerError {
/// Network error, typically caused by UDP send/recv here.
Io(io::Error),
/// OSC error, typically caused by failing to encode/decode OSC data structures.
Osc(rosc::OscError),
/// Error in cases where valid OSC packets were received, but containing invalid payloads (e.g.
/// a `/send_after` containing unexpected arguments).
Protocol(String),
}
impl fmt::Display for ServerError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl Error for ServerError {}
impl From<io::Error> for ServerError {
fn from(err: io::Error) -> Self {
Self::Io(err)
}
}
impl From<rosc::OscError> for ServerError {
fn from(err: rosc::OscError) -> Self {
Self::Osc(err)
}
}
#[tokio::main]
async fn main() -> Result<(), io::Error> {
env_logger::init();
let addr = env::args().nth(1).unwrap_or_else(|| "127.0.0.1:4560".to_string());
Server::new(&addr).await?.run().await
}
#[cfg(test)]
mod tests {
use crate::timetag_to_unix;
#[test]
fn time_tag_to_unix_1() {
// 2^32 / 2 fractional seconds, i.e. 500,000μs
assert_eq!(timetag_to_unix(3_608_146_800, 2_147_483_648), (1_399_158_000, 500_000));
}
#[test]
fn time_tag_to_unix_2() {
assert_eq!(timetag_to_unix(3549086042, 4010129359), (1340097242, 933680));
}
#[test]
fn time_tag_to_unix_seconds_only() {
assert_eq!(timetag_to_unix(3_608_146_800, 0), (1_399_158_000, 0));
}
// TODO: tests for time tags in the past, invalid time tags, once error requirement determined
}
| {
match self {
Self::Io(err) => write!(f, "IO error: {}", err),
Self::Osc(err) => write!(f, "Failed to decode OSC packet: {:?}", err),
Self::Protocol(err) => write!(f, "{}", err),
}
} | identifier_body |
001-rnn+lstm+crf.py | """
@file : 001-rnn+lstm+crf.py
@author: xiaolu
@time : 2019-09-06
"""
import re
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
'''
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
'''
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout
)
# 1. define input
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
# 2. embedding
self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_char),
cell_bw=cells(hidden_size_char),
inputs=char_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_char_%d' % n
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_word),
cell_bw=cells(hidden_size_word),
inputs=word_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_word_%d' % n
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
'''
加载文件并且解析
:param file: 文件名
:return: 词<->词性
'''
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
'''
:param string:
:return:
'''
string= re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
'''
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
'''
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower() # 当前这个单词转小写
tag = labels[no] # 取出对应的词性
for c in text: # 字符表
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx: # 词性表
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值
if text not in word2idx: # 词表
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text]) # 将词转为id的标号
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
'''
:param args: 词转为的id的序列 词性转为id的序列
:return:
'''
r | ar_seq(batch):
'''
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
'''
x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度
maxlen = max([j for i in x for j in i]) # 最大长度
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1-no] = char2idx[c]
return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]
def pred2label(pred):
# 将预测结果转为标签
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
if __name__ == '__main__':
left_train, right_train = parse('./data/eng.train')
left_test, right_test = parse('./data/eng.testa')
# print(left_train[:10])
# print(right_train[:10])
word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表
tag2idx = {'PAD': 0} # 词性表
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
# print(train_X[:20])
# print(train_Y[:20])
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落
X_char_seq = generate_char_seq(X_seq)
print(X_seq.shape) # (203571, 50)
print(X_char_seq.shape) # (203571, 50, 61)
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
print(X_seq_test.shape) # (51312, 50)
print(X_char_seq_test.shape) # (51312, 50, 27)
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
tf.reset_default_graph()
sess = tf.Session()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 32
model = Model(dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers)
sess.run(tf.global_variables_initializer())
for e in range(3):
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, len(train_X), batch_size):
batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
train_loss += cost
train_acc += acc
print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
test_loss += cost
test_acc += acc
print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc))
real_Y, predict_Y = [], []
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
predicted = pred2label(
sess.run(model.tags_seq,
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
},
)
)
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel())) | eturn [iter_seq(x) for x in args]
def generate_ch | conditional_block |
001-rnn+lstm+crf.py | """
@file : 001-rnn+lstm+crf.py
@author: xiaolu
@time : 2019-09-06
"""
import re
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
'''
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
'''
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout
)
# 1. define input
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
# 2. embedding
self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_char),
cell_bw=cells(hidden_size_char),
inputs=char_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_char_%d' % n
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_word),
cell_bw=cells(hidden_size_word),
inputs=word_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_word_%d' % n
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
'''
加载文件并且解析
:param file: 文件名
:return: 词<->词性
'''
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
'''
:param string:
:return:
'''
string= re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
'''
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
'''
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower() # 当前这个单词转小写
tag = labels[no] # 取出对应的词性
for c in text: # 字符表
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx: # 词性表
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值 | if text not in word2idx: # 词表
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text]) # 将词转为id的标号
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
'''
:param args: 词转为的id的序列 词性转为id的序列
:return:
'''
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
'''
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
'''
x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度
maxlen = max([j for i in x for j in i]) # 最大长度
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1-no] = char2idx[c]
return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]
def pred2label(pred):
# 将预测结果转为标签
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
if __name__ == '__main__':
left_train, right_train = parse('./data/eng.train')
left_test, right_test = parse('./data/eng.testa')
# print(left_train[:10])
# print(right_train[:10])
word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表
tag2idx = {'PAD': 0} # 词性表
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
# print(train_X[:20])
# print(train_Y[:20])
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落
X_char_seq = generate_char_seq(X_seq)
print(X_seq.shape) # (203571, 50)
print(X_char_seq.shape) # (203571, 50, 61)
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
print(X_seq_test.shape) # (51312, 50)
print(X_char_seq_test.shape) # (51312, 50, 27)
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
tf.reset_default_graph()
sess = tf.Session()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 32
model = Model(dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers)
sess.run(tf.global_variables_initializer())
for e in range(3):
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, len(train_X), batch_size):
batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
train_loss += cost
train_acc += acc
print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
test_loss += cost
test_acc += acc
print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc))
real_Y, predict_Y = [], []
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
predicted = pred2label(
sess.run(model.tags_seq,
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
},
)
)
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel())) | random_line_split | |
001-rnn+lstm+crf.py | """
@file : 001-rnn+lstm+crf.py
@author: xiaolu
@time : 2019-09-06
"""
import re
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
'''
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
'''
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout
)
# 1. define input
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
# 2. embedding
self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_char),
cell_bw=cells(hidden_size_char),
inputs=char_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_char_%d' % n
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_word),
cell_bw=cells(hidden_size_word),
inputs=word_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_word_%d' % n
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
'''
加载文件并且解析
:param file: 文件名
:return: 词<->词性
'''
with open(file) | open:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
'''
:param string:
:return:
'''
string= re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
'''
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
'''
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower() # 当前这个单词转小写
tag = labels[no] # 取出对应的词性
for c in text: # 字符表
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx: # 词性表
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值
if text not in word2idx: # 词表
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text]) # 将词转为id的标号
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
'''
:param args: 词转为的id的序列 词性转为id的序列
:return:
'''
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
'''
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
'''
x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度
maxlen = max([j for i in x for j in i]) # 最大长度
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1-no] = char2idx[c]
return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]
def pred2label(pred):
# 将预测结果转为标签
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
if __name__ == '__main__':
left_train, right_train = parse('./data/eng.train')
left_test, right_test = parse('./data/eng.testa')
# print(left_train[:10])
# print(right_train[:10])
word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表
tag2idx = {'PAD': 0} # 词性表
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
# print(train_X[:20])
# print(train_Y[:20])
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落
X_char_seq = generate_char_seq(X_seq)
print(X_seq.shape) # (203571, 50)
print(X_char_seq.shape) # (203571, 50, 61)
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
print(X_seq_test.shape) # (51312, 50)
print(X_char_seq_test.shape) # (51312, 50, 27)
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
tf.reset_default_graph()
sess = tf.Session()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 32
model = Model(dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers)
sess.run(tf.global_variables_initializer())
for e in range(3):
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, len(train_X), batch_size):
batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
train_loss += cost
train_acc += acc
print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
test_loss += cost
test_acc += acc
print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc))
real_Y, predict_Y = [], []
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
predicted = pred2label(
sess.run(model.tags_seq,
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
},
)
)
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel())) | as f | identifier_name |
001-rnn+lstm+crf.py | """
@file : 001-rnn+lstm+crf.py
@author: xiaolu
@time : 2019-09-06
"""
import re
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
'''
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
'''
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout
)
# 1. define input
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
# 2. embedding
self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_char),
cell_bw=cells(hidden_size_char),
inputs=char_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_char_%d' % n
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_word),
cell_bw=cells(hidden_size_word),
inputs=word_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_word_%d' % n
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
'''
加载文件并且解析
:param file: 文件名
:return: 词<->词性
'''
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
'''
:param string:
:return:
'''
string= re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
'''
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
'''
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower() # 当前这个单词转小写
tag = labels[no] # 取出对应的词性
for c in text: # 字符表
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx: # 词性表
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值
if text not in word2idx: # 词表
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text]) # 将词转为id的标号
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
'''
:param args: 词转为的id的序列 词性转为id的序列
:return:
'''
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
'''
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
'''
x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度
maxlen = max([j for i in x for j in i]) # 最大长度
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1-no] = char2idx[c]
return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]
def pred2label(pred):
# 将预测结果转为标签
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
if __name__ == '__main__':
left_train, right_train = parse('./data/eng.train')
left_test, right_test = parse('./data/eng.testa')
# print(left_train[:10])
# print(right_train[:10])
word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表
tag2idx = {'PAD': 0} # 词性表
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
| # print(train_Y[:20])
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落
X_char_seq = generate_char_seq(X_seq)
print(X_seq.shape) # (203571, 50)
print(X_char_seq.shape) # (203571, 50, 61)
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
print(X_seq_test.shape) # (51312, 50)
print(X_char_seq_test.shape) # (51312, 50, 27)
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
tf.reset_default_graph()
sess = tf.Session()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 32
model = Model(dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers)
sess.run(tf.global_variables_initializer())
for e in range(3):
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, len(train_X), batch_size):
batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
train_loss += cost
train_acc += acc
print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
test_loss += cost
test_acc += acc
print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc))
real_Y, predict_Y = [], []
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
predicted = pred2label(
sess.run(model.tags_seq,
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
},
)
)
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel())) | char_idx = 1
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
# print(train_X[:20])
| identifier_body |
lz4.rs | /*!
LZ4 Decompression and Compression. Requires `lz4` feature, enabled by default
This module contains an implementation in Rust of decompression and compression
of LZ4-encoded streams. These are exposed as a standard `Reader` and `Writer`
interfaces wrapping an underlying stream.
# Example
```rust,ignore
use compress::lz4;
use std::fs::File;
use std::path::Path;
use std::io::Read;
let stream = File::open(&Path::new("path/to/file.lz4")).unwrap();
let mut decompressed = Vec::new();
lz4::Decoder::new(stream).read_to_end(&mut decompressed);
```
# Credit
This implementation is largely based on Branimir Karadžić's implementation which
can be found at https://github.com/bkaradzic/go-lz4.
*/
use std::cmp;
use std::ptr::copy_nonoverlapping;
use std::io::{self, Read, Write};
use std::iter::repeat;
use std::vec::Vec;
use std::num::Wrapping;
use std::ops::Shr;
use super::byteorder::{LittleEndian, WriteBytesExt, ReadBytesExt};
use super::{ReadExact, byteorder_err_to_io};
const MAGIC: u32 = 0x184d2204;
const ML_BITS: u32 = 4;
const ML_MASK: u32 = (1 << ML_BITS as usize) - 1;
const RUN_BITS: u32 = 8 - ML_BITS;
const RUN_MASK: u32 = (1 << RUN_BITS as usize) - 1;
const MIN_MATCH: u32 = 4;
const HASH_LOG: u32 = 17;
const HASH_TABLE_SIZE: u32 = 1 << (HASH_LOG as usize);
const HASH_SHIFT: u32 = (MIN_MATCH * 8) - HASH_LOG;
const INCOMPRESSIBLE: u32 = 128;
const UNINITHASH: u32 = 0x88888888;
const MAX_INPUT_SIZE: u32 = 0x7e000000;
struct BlockDecoder<'a> {
input: &'a [u8],
output: &'a mut Vec<u8>,
cur: usize,
start: usize,
end: usize,
}
impl<'a> BlockDecoder<'a> {
/// Decodes this block of data from 'input' to 'output', returning the
/// number of valid bytes in the output.
fn decode(&mut self) -> usize {
while self.cur < self.input.len() {
let code = self.bump();
debug!("block with code: {:x}", code);
// Extract a chunk of data from the input to the output.
{
let len = self.length(code >> 4);
debug!("consume len {}", len);
if len > 0 {
let end = self.end;
self.grow_output(end + len);
unsafe { copy_nonoverlapping(
&self.input[self.cur],
&mut self.output[end],
len
)};
self.end += len;
self.cur += len;
}
}
if self.cur == self.input.len() { break }
// Read off the next i16 offset
{
let back = (self.bump() as usize) | ((self.bump() as usize) << 8);
debug!("found back {}", back);
self.start = self.end - back;
}
// Slosh around some bytes now
{
let mut len = self.length(code & 0xf);
let literal = self.end - self.start;
if literal < 4 {
static DECR: [usize; 4] = [0, 3, 2, 3];
self.cp(4, DECR[literal]);
} else {
len += 4;
}
self.cp(len, 0);
}
}
self.end
}
fn length(&mut self, code: u8) -> usize {
let mut ret = code as usize;
if code == 0xf {
loop {
let tmp = self.bump();
ret += tmp as usize;
if tmp != 0xff { break }
}
}
ret
}
fn bump(&mut self) -> u8 {
let ret = self.input[self.cur];
self.cur += 1;
ret
}
#[inline]
fn cp(&mut self, len: usize, decr: usize) {
let end = self.end;
self.grow_output(end + len);
for i in 0..len {
self.output[end + i] = (*self.output)[self.start + i];
}
self.end += len;
self.start += len - decr;
}
// Extends the output vector to a target number of bytes (in total), but
// does not actually initialize the new data. The length of the vector is
// updated, but the bytes will all have undefined values. It is assumed that
// the next operation is to pave over these bytes (so the initialization is
// unnecessary).
#[inline]
fn grow_output(&mut self, target: usize) {
if self.output.capacity() < target {
debug!("growing {} to {}", self.output.capacity(), target);
//let additional = target - self.output.capacity();
//self.output.reserve(additional);
while self.output.len() < target {
self.output.push(0);
}
}else {
unsafe {
self.output.set_len(target);
}
}
}
}
struct BlockEncoder<'a> {
input: &'a [u8],
output: &'a mut Vec<u8>,
hash_table: Vec<u32>,
pos: u32,
anchor: u32,
dest_pos: u32
}
/// Returns maximum possible size of compressed output
/// given source size
pub fn compression_bound(size: u32) -> Option<u32> {
if size > MAX_INPUT_SIZE {
None
} else {
Some(size + (size / 255) + 16 + 4)
}
}
impl<'a> BlockEncoder<'a> {
#[inline(always)]
fn seq_at(&self, pos: u32) -> u32 {
(self.input[pos as usize + 3] as u32) << 24
| (self.input[pos as usize + 2] as u32) << 16
| (self.input[pos as usize + 1] as u32) << 8
| (self.input[pos as usize] as u32)
}
fn write_literals(&mut self, len: u32, ml_len: u32, pos: u32) {
let mut ln = len;
let code = if ln > RUN_MASK - 1 { RUN_MASK as u8 } else { ln as u8 };
if ml_len > ML_MASK - 1 {
self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ML_MASK as u8;
} else {
self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ml_len as u8;
}
self.dest_pos += 1;
if code == RUN_MASK as u8 {
| // FIXME: find out why slicing syntax fails tests
//self.output[self.dest_pos as usize .. (self.dest_pos + len) as usize] = self.input[pos as uint.. (pos + len) as uint];
for i in 0..(len as usize) {
self.output[self.dest_pos as usize + i] = self.input[pos as usize + i];
}
self.dest_pos += len;
}
fn encode(&mut self) -> u32 {
let input_len = self.input.len() as u32;
match compression_bound(input_len) {
None => 0,
Some(out_size) => {
let out_size_usize = out_size as usize;
if self.output.capacity() < out_size_usize {
let additional = out_size_usize - self.output.capacity();
self.output.reserve(additional);
}
unsafe {self.output.set_len(out_size_usize); }
let mut step = 1u32;
let mut limit = INCOMPRESSIBLE;
loop {
if self.pos + 12 > input_len {
let tmp = self.anchor;
self.write_literals(self.input.len() as u32 - tmp, 0, tmp);
unsafe { self.output.set_len(self.dest_pos as usize) };
return self.dest_pos;
}
let seq = self.seq_at(self.pos);
let hash = (Wrapping(seq) * Wrapping(2654435761)).shr(HASH_SHIFT as usize).0;
let mut r = (Wrapping(self.hash_table[hash as usize]) + Wrapping(UNINITHASH)).0;
self.hash_table[hash as usize] = (Wrapping(self.pos) - Wrapping(UNINITHASH)).0;
if (Wrapping(self.pos) - Wrapping(r)).shr(16).0 != 0 || seq != self.seq_at(r) {
if self.pos - self.anchor > limit {
limit = limit << 1;
step += 1 + (step >> 2);
}
self.pos += step;
continue;
}
if step > 1 {
self.hash_table[hash as usize] = r - UNINITHASH;
self.pos -= step - 1;
step = 1;
continue;
}
limit = INCOMPRESSIBLE;
let ln = self.pos - self.anchor;
let back = self.pos - r;
let anchor = self.anchor;
self.pos += MIN_MATCH;
r += MIN_MATCH;
self.anchor = self.pos;
while (self.pos < input_len - 5) && self.input[self.pos as usize] == self.input[r as usize] {
self.pos += 1;
r += 1
}
let mut ml_len = self.pos - self.anchor;
self.write_literals(ln, ml_len, anchor);
self.output[self.dest_pos as usize] = back as u8;
self.output[self.dest_pos as usize + 1] = (back >> 8) as u8;
self.dest_pos += 2;
if ml_len > ML_MASK - 1 {
ml_len -= ML_MASK;
while ml_len > 254 {
ml_len -= 255;
self.output[self.dest_pos as usize] = 255;
self.dest_pos += 1;
}
self.output[self.dest_pos as usize] = ml_len as u8;
self.dest_pos += 1;
}
self.anchor = self.pos;
}
}
}
}
}
/// This structure is used to decode a stream of LZ4 blocks. This wraps an
/// internal reader which is read from when this decoder's read method is
/// called.
pub struct Decoder<R> {
/// The internally wrapped reader. This is exposed so it may be moved out
/// of. Note that if data is read from the reader while decoding is in
/// progress the output stream will get corrupted.
pub r: R,
temp: Vec<u8>,
output: Vec<u8>,
start: usize,
end: usize,
eof: bool,
header: bool,
blk_checksum: bool,
stream_checksum: bool,
max_block_size: usize,
}
impl<R: Read + Sized> Decoder<R> {
/// Creates a new decoder which will read data from the given stream. The
/// inner stream can be re-acquired by moving out of the `r` field of this
/// structure.
pub fn new(r: R) -> Decoder<R> {
Decoder {
r: r,
temp: Vec::new(),
output: Vec::new(),
header: false,
blk_checksum: false,
stream_checksum: false,
start: 0,
end: 0,
eof: false,
max_block_size: 0,
}
}
/// Resets this decoder back to its initial state. Note that the underlying
/// stream is not seeked on or has any alterations performed on it.
pub fn reset(&mut self) {
self.header = false;
self.eof = false;
self.start = 0;
self.end = 0;
}
fn read_header(&mut self) -> io::Result<()> {
// Make sure the magic number is what's expected.
if try!(self.r.read_u32::<LittleEndian>()) != MAGIC {
return Err(io::Error::new(io::ErrorKind::InvalidInput, ""))
}
let mut bits = [0; 3];
try!(self.r.read(&mut bits[..2]));
let flg = bits[0];
let bd = bits[1];
// bits 7/6, the version number. Right now this must be 1
if (flg >> 6) != 0b01 {
return Err(io::Error::new(io::ErrorKind::InvalidInput, ""))
}
// bit 5 is the "block independence", don't care about this yet
// bit 4 is whether blocks have checksums or not
self.blk_checksum = (flg & 0x10) != 0;
// bit 3 is whether there is a following stream size
let stream_size = (flg & 0x08) != 0;
// bit 2 is whether there is a stream checksum
self.stream_checksum = (flg & 0x04) != 0;
// bit 1 is reserved
// bit 0 is whether there is a preset dictionary
let preset_dictionary = (flg & 0x01) != 0;
static MAX_SIZES: [usize; 8] =
[0, 0, 0, 0, // all N/A
64 << 10, // 64KB
256 << 10, // 256 KB
1 << 20, // 1MB
4 << 20]; // 4MB
// bit 7 is reserved
// bits 6-4 are the maximum block size
let max_block_size = MAX_SIZES[(bd >> 4) as usize & 0x7];
// bits 3-0 are reserved
// read off other portions of the stream
let size = if stream_size {
Some(try!(self.r.read_u64::<LittleEndian>()))
} else {
None
};
assert!(!preset_dictionary, "preset dictionaries not supported yet");
debug!("blk: {}", self.blk_checksum);
debug!("stream: {}", self.stream_checksum);
debug!("max size: {}", max_block_size);
debug!("stream size: {:?}", size);
self.max_block_size = max_block_size;
// XXX: implement checksums
let cksum = try!(self.r.read_u8());
debug!("ignoring header checksum: {}", cksum);
return Ok(());
}
fn decode_block(&mut self) -> io::Result<bool> {
match try!(self.r.read_u32::<LittleEndian>()) {
// final block, we're done here
0 => return Ok(false),
// raw block to read
n if n & 0x80000000 != 0 => {
let amt = (n & 0x7fffffff) as usize;
self.output.truncate(0);
self.output.reserve(amt);
try!(self.r.push_exactly(amt as u64, &mut self.output));
self.start = 0;
self.end = amt;
}
// actual block to decompress
n => {
let n = n as usize;
self.temp.truncate(0);
self.temp.reserve(n);
try!(self.r.push_exactly(n as u64, &mut self.temp));
let target = cmp::min(self.max_block_size, 4 * n / 3);
self.output.truncate(0);
self.output.reserve(target);
let mut decoder = BlockDecoder {
input: &self.temp[..n],
output: &mut self.output,
cur: 0,
start: 0,
end: 0,
};
self.start = 0;
self.end = decoder.decode();
}
}
if self.blk_checksum {
let cksum = try!(self.r.read_u32::<LittleEndian>());
debug!("ignoring block checksum {}", cksum);
}
return Ok(true);
}
/// Tests whether the end of this LZ4 stream has been reached
pub fn eof(&mut self) -> bool { self.eof }
}
impl<R: Read> Read for Decoder<R> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
if self.eof { return Ok(0) }
if !self.header {
try!(self.read_header());
self.header = true;
}
let mut amt = dst.len();
let len = amt;
while amt > 0 {
if self.start == self.end {
let keep_going = try!(self.decode_block());
if !keep_going {
self.eof = true;
break;
}
}
let n = cmp::min(amt, self.end - self.start);
unsafe { copy_nonoverlapping(
&self.output[self.start],
&mut dst[len - amt],
n
)};
self.start += n;
amt -= n;
}
Ok(len - amt)
}
}
/// This structure is used to compress a stream of bytes using the LZ4
/// compression algorithm. This is a wrapper around an internal writer which
/// bytes will be written to.
pub struct Encoder<W> {
w: W,
buf: Vec<u8>,
tmp: Vec<u8>,
wrote_header: bool,
limit: usize,
}
impl<W: Write> Encoder<W> {
/// Creates a new encoder which will have its output written to the given
/// output stream. The output stream can be re-acquired by calling
/// `finish()`
///
/// NOTE: compression isn't actually implemented just yet, this is just a
/// skeleton of a future implementation.
pub fn new(w: W) -> Encoder<W> {
Encoder {
w: w,
wrote_header: false,
buf: Vec::with_capacity(1024),
tmp: Vec::new(),
limit: 256 * 1024,
}
}
fn encode_block(&mut self) -> io::Result<()> {
self.tmp.truncate(0);
if self.compress() {
try!(self.w.write_u32::<LittleEndian>(self.tmp.len() as u32));
try!(self.w.write(&self.tmp));
} else {
try!(self.w.write_u32::<LittleEndian>((self.buf.len() as u32) | 0x80000000));
try!(self.w.write(&self.buf));
}
self.buf.truncate(0);
Ok(())
}
fn compress(&mut self) -> bool {
false
}
/// This function is used to flag that this session of compression is done
/// with. The stream is finished up (final bytes are written), and then the
/// wrapped writer is returned.
pub fn finish(mut self) -> (W, io::Result<()>) {
let mut result = self.flush();
for _ in 0..2 {
let tmp = self.w.write_u32::<LittleEndian>(0)
.map_err(byteorder_err_to_io);
result = result.and_then(|_| tmp);
}
(self.w, result)
}
}
impl<W: Write> Write for Encoder<W> {
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
if !self.wrote_header {
try!(self.w.write_u32::<LittleEndian>(MAGIC));
// version 01, turn on block independence, but turn off
// everything else (we have no checksums right now).
try!(self.w.write_u8(0b01_100000));
// Maximum block size is 256KB
try!(self.w.write_u8(0b0_101_0000));
// XXX: this checksum is just plain wrong.
try!(self.w.write_u8(0));
self.wrote_header = true;
}
while buf.len() > 0 {
let amt = cmp::min(self.limit - self.buf.len(), buf.len());
self.buf.extend(buf[..amt].iter().map(|b| *b));
if self.buf.len() == self.limit {
try!(self.encode_block());
}
buf = &buf[amt..];
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
if self.buf.len() > 0 {
try!(self.encode_block());
}
self.w.flush()
}
}
/// Decodes pure LZ4 block into output. Returns count of bytes
/// processed.
pub fn decode_block(input: &[u8], output: &mut Vec<u8>) -> usize {
let mut b = BlockDecoder {
input: input,
output: output,
cur: 0,
start: 0,
end: 0
};
b.decode()
}
/// Encodes input into pure LZ4 block. Return count of bytes
/// processed.
pub fn encode_block(input: &[u8], output: &mut Vec<u8>) -> usize {
let mut encoder = BlockEncoder {
input: input,
output: output,
hash_table: repeat(0).take(HASH_TABLE_SIZE as usize).collect(),
pos: 0,
anchor: 0,
dest_pos: 0
};
encoder.encode() as usize
}
#[cfg(test)]
mod test {
use std::io::{BufReader, BufWriter, Read, Write};
use super::super::rand;
use super::{Decoder, Encoder};
#[cfg(feature="unstable")]
use test;
use super::super::byteorder::ReadBytesExt;
fn test_decode(input: &[u8], output: &[u8]) {
let mut d = Decoder::new(BufReader::new(input));
let mut buf = Vec::new();
d.read_to_end(&mut buf).unwrap();
assert!(&buf[..] == output);
}
#[test]
fn decode() {
let reference = include_bytes!("data/test.txt");
test_decode(include_bytes!("data/test.lz4.1"), reference);
test_decode(include_bytes!("data/test.lz4.2"), reference);
test_decode(include_bytes!("data/test.lz4.3"), reference);
test_decode(include_bytes!("data/test.lz4.4"), reference);
test_decode(include_bytes!("data/test.lz4.5"), reference);
test_decode(include_bytes!("data/test.lz4.6"), reference);
test_decode(include_bytes!("data/test.lz4.7"), reference);
test_decode(include_bytes!("data/test.lz4.8"), reference);
test_decode(include_bytes!("data/test.lz4.9"), reference);
}
#[test]
fn raw_encode_block() {
let data = include_bytes!("data/test.txt");
let mut encoded = Vec::new();
super::encode_block(data, &mut encoded);
let mut decoded = Vec::new();
super::decode_block(&encoded[..], &mut decoded);
assert_eq!(&data[..], &decoded[..]);
}
#[test]
fn one_byte_at_a_time() {
let input = include_bytes!("data/test.lz4.1");
let mut d = Decoder::new(BufReader::new(&input[..]));
assert!(!d.eof());
let mut out = Vec::new();
loop {
match d.read_u8() {
Ok(b) => out.push(b),
Err(..) => break
}
}
assert!(d.eof());
assert!(&out[..] == &include_bytes!("data/test.txt")[..]);
}
#[test]
fn random_byte_lengths() {
let input = include_bytes!("data/test.lz4.1");
let mut d = Decoder::new(BufReader::new(&input[..]));
let mut out = Vec::new();
let mut buf = [0u8; 40];
loop {
match d.read(&mut buf[..(1 + rand::random::<usize>() % 40)]) {
Ok(0) => break,
Ok(n) => {
out.extend(buf[..n].iter().map(|b| *b));
}
Err(..) => break
}
}
assert!(&out[..] == &include_bytes!("data/test.txt")[..]);
}
fn roundtrip(bytes: &[u8]) {
let mut e = Encoder::new(BufWriter::new(Vec::new()));
e.write(bytes).unwrap();
let (e, err) = e.finish();
err.unwrap();
let encoded = e.into_inner().unwrap();
let mut d = Decoder::new(BufReader::new(&encoded[..]));
let mut decoded = Vec::new();
d.read_to_end(&mut decoded).unwrap();
assert_eq!(&decoded[..], bytes);
}
#[test]
fn some_roundtrips() {
roundtrip(b"test");
roundtrip(b"");
roundtrip(include_bytes!("data/test.txt"));
}
#[cfg(feature="unstable")]
#[bench]
fn decompress_speed(bh: &mut test::Bencher) {
let input = include_bytes!("data/test.lz4.9");
let mut d = Decoder::new(BufReader::new(&input[..]));
let mut output = [0u8; 65536];
let mut output_size = 0;
bh.iter(|| {
d.r = BufReader::new(&input[..]);
d.reset();
output_size = d.read(&mut output).unwrap();
});
bh.bytes = output_size as u64;
}
}
| ln -= RUN_MASK;
while ln > 254 {
self.output[self.dest_pos as usize] = 255;
self.dest_pos += 1;
ln -= 255;
}
self.output[self.dest_pos as usize] = ln as u8;
self.dest_pos += 1;
}
| conditional_block |
lz4.rs | /*!
LZ4 Decompression and Compression. Requires `lz4` feature, enabled by default
This module contains an implementation in Rust of decompression and compression
of LZ4-encoded streams. These are exposed as a standard `Reader` and `Writer`
interfaces wrapping an underlying stream.
# Example
```rust,ignore
use compress::lz4;
use std::fs::File;
use std::path::Path;
use std::io::Read;
let stream = File::open(&Path::new("path/to/file.lz4")).unwrap();
let mut decompressed = Vec::new();
lz4::Decoder::new(stream).read_to_end(&mut decompressed);
```
# Credit
This implementation is largely based on Branimir Karadžić's implementation which
can be found at https://github.com/bkaradzic/go-lz4.
*/
use std::cmp;
use std::ptr::copy_nonoverlapping;
use std::io::{self, Read, Write};
use std::iter::repeat;
use std::vec::Vec;
use std::num::Wrapping;
use std::ops::Shr;
use super::byteorder::{LittleEndian, WriteBytesExt, ReadBytesExt};
use super::{ReadExact, byteorder_err_to_io};
const MAGIC: u32 = 0x184d2204;
const ML_BITS: u32 = 4;
const ML_MASK: u32 = (1 << ML_BITS as usize) - 1;
const RUN_BITS: u32 = 8 - ML_BITS;
const RUN_MASK: u32 = (1 << RUN_BITS as usize) - 1;
const MIN_MATCH: u32 = 4;
const HASH_LOG: u32 = 17;
const HASH_TABLE_SIZE: u32 = 1 << (HASH_LOG as usize);
const HASH_SHIFT: u32 = (MIN_MATCH * 8) - HASH_LOG;
const INCOMPRESSIBLE: u32 = 128;
const UNINITHASH: u32 = 0x88888888;
const MAX_INPUT_SIZE: u32 = 0x7e000000;
struct BlockDecoder<'a> {
input: &'a [u8],
output: &'a mut Vec<u8>,
cur: usize,
start: usize,
end: usize,
}
impl<'a> BlockDecoder<'a> {
/// Decodes this block of data from 'input' to 'output', returning the
/// number of valid bytes in the output.
fn decode(&mut self) -> usize {
while self.cur < self.input.len() {
let code = self.bump();
debug!("block with code: {:x}", code);
// Extract a chunk of data from the input to the output.
{
let len = self.length(code >> 4);
debug!("consume len {}", len);
if len > 0 {
let end = self.end;
self.grow_output(end + len);
unsafe { copy_nonoverlapping(
&self.input[self.cur],
&mut self.output[end],
len
)};
self.end += len;
self.cur += len;
}
}
if self.cur == self.input.len() { break }
// Read off the next i16 offset
{
let back = (self.bump() as usize) | ((self.bump() as usize) << 8);
debug!("found back {}", back);
self.start = self.end - back;
}
// Slosh around some bytes now
{
let mut len = self.length(code & 0xf);
let literal = self.end - self.start;
if literal < 4 {
static DECR: [usize; 4] = [0, 3, 2, 3];
self.cp(4, DECR[literal]);
} else {
len += 4;
}
self.cp(len, 0);
}
}
self.end
}
fn length(&mut self, code: u8) -> usize {
let mut ret = code as usize;
if code == 0xf {
loop {
let tmp = self.bump();
ret += tmp as usize;
if tmp != 0xff { break }
}
}
ret
}
fn bump(&mut self) -> u8 {
let ret = self.input[self.cur];
self.cur += 1;
ret
}
#[inline]
fn cp(&mut self, len: usize, decr: usize) {
let end = self.end;
self.grow_output(end + len);
for i in 0..len {
self.output[end + i] = (*self.output)[self.start + i];
}
self.end += len;
self.start += len - decr;
}
// Extends the output vector to a target number of bytes (in total), but
// does not actually initialize the new data. The length of the vector is
// updated, but the bytes will all have undefined values. It is assumed that
// the next operation is to pave over these bytes (so the initialization is
// unnecessary).
#[inline]
fn grow_output(&mut self, target: usize) {
if self.output.capacity() < target {
debug!("growing {} to {}", self.output.capacity(), target);
//let additional = target - self.output.capacity();
//self.output.reserve(additional);
while self.output.len() < target {
self.output.push(0);
}
}else {
unsafe {
self.output.set_len(target);
}
}
}
}
struct BlockEncoder<'a> {
input: &'a [u8],
output: &'a mut Vec<u8>,
hash_table: Vec<u32>,
pos: u32,
anchor: u32,
dest_pos: u32
}
/// Returns maximum possible size of compressed output
/// given source size
pub fn compression_bound(size: u32) -> Option<u32> {
if size > MAX_INPUT_SIZE {
None
} else {
Some(size + (size / 255) + 16 + 4)
}
}
impl<'a> BlockEncoder<'a> {
#[inline(always)]
fn seq_at(&self, pos: u32) -> u32 {
(self.input[pos as usize + 3] as u32) << 24
| (self.input[pos as usize + 2] as u32) << 16
| (self.input[pos as usize + 1] as u32) << 8
| (self.input[pos as usize] as u32)
}
fn write_literals(&mut self, len: u32, ml_len: u32, pos: u32) {
let mut ln = len;
let code = if ln > RUN_MASK - 1 { RUN_MASK as u8 } else { ln as u8 };
if ml_len > ML_MASK - 1 {
self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ML_MASK as u8;
} else {
self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ml_len as u8;
}
self.dest_pos += 1;
if code == RUN_MASK as u8 {
ln -= RUN_MASK;
while ln > 254 {
self.output[self.dest_pos as usize] = 255;
self.dest_pos += 1;
ln -= 255;
}
self.output[self.dest_pos as usize] = ln as u8;
self.dest_pos += 1;
}
// FIXME: find out why slicing syntax fails tests
//self.output[self.dest_pos as usize .. (self.dest_pos + len) as usize] = self.input[pos as uint.. (pos + len) as uint];
for i in 0..(len as usize) {
self.output[self.dest_pos as usize + i] = self.input[pos as usize + i];
}
self.dest_pos += len;
}
fn encode(&mut self) -> u32 {
let input_len = self.input.len() as u32;
match compression_bound(input_len) {
None => 0,
Some(out_size) => {
let out_size_usize = out_size as usize;
if self.output.capacity() < out_size_usize {
let additional = out_size_usize - self.output.capacity();
self.output.reserve(additional);
}
unsafe {self.output.set_len(out_size_usize); }
let mut step = 1u32;
let mut limit = INCOMPRESSIBLE;
loop {
if self.pos + 12 > input_len {
let tmp = self.anchor;
self.write_literals(self.input.len() as u32 - tmp, 0, tmp);
unsafe { self.output.set_len(self.dest_pos as usize) };
return self.dest_pos;
}
let seq = self.seq_at(self.pos);
let hash = (Wrapping(seq) * Wrapping(2654435761)).shr(HASH_SHIFT as usize).0;
let mut r = (Wrapping(self.hash_table[hash as usize]) + Wrapping(UNINITHASH)).0;
self.hash_table[hash as usize] = (Wrapping(self.pos) - Wrapping(UNINITHASH)).0;
if (Wrapping(self.pos) - Wrapping(r)).shr(16).0 != 0 || seq != self.seq_at(r) {
if self.pos - self.anchor > limit {
limit = limit << 1;
step += 1 + (step >> 2);
}
self.pos += step;
continue;
}
if step > 1 {
self.hash_table[hash as usize] = r - UNINITHASH;
self.pos -= step - 1;
step = 1;
continue;
}
limit = INCOMPRESSIBLE;
let ln = self.pos - self.anchor;
let back = self.pos - r;
let anchor = self.anchor;
self.pos += MIN_MATCH;
r += MIN_MATCH;
self.anchor = self.pos;
while (self.pos < input_len - 5) && self.input[self.pos as usize] == self.input[r as usize] {
self.pos += 1;
r += 1
}
let mut ml_len = self.pos - self.anchor;
self.write_literals(ln, ml_len, anchor);
self.output[self.dest_pos as usize] = back as u8;
self.output[self.dest_pos as usize + 1] = (back >> 8) as u8;
self.dest_pos += 2;
if ml_len > ML_MASK - 1 {
ml_len -= ML_MASK;
while ml_len > 254 {
ml_len -= 255;
self.output[self.dest_pos as usize] = 255;
self.dest_pos += 1;
}
self.output[self.dest_pos as usize] = ml_len as u8;
self.dest_pos += 1;
}
self.anchor = self.pos;
}
}
}
}
}
/// This structure is used to decode a stream of LZ4 blocks. This wraps an
/// internal reader which is read from when this decoder's read method is
/// called.
pub struct Decoder<R> {
/// The internally wrapped reader. This is exposed so it may be moved out
/// of. Note that if data is read from the reader while decoding is in
/// progress the output stream will get corrupted.
pub r: R,
temp: Vec<u8>,
output: Vec<u8>,
start: usize,
end: usize,
eof: bool,
header: bool,
blk_checksum: bool,
stream_checksum: bool,
max_block_size: usize,
}
impl<R: Read + Sized> Decoder<R> {
/// Creates a new decoder which will read data from the given stream. The
/// inner stream can be re-acquired by moving out of the `r` field of this
/// structure.
pub fn new(r: R) -> Decoder<R> {
Decoder {
r: r,
temp: Vec::new(),
output: Vec::new(),
header: false,
blk_checksum: false,
stream_checksum: false,
start: 0,
end: 0,
eof: false,
max_block_size: 0,
}
}
/// Resets this decoder back to its initial state. Note that the underlying
/// stream is not seeked on or has any alterations performed on it.
pub fn reset(&mut self) {
self.header = false;
self.eof = false;
self.start = 0;
self.end = 0;
}
fn read_header(&mut self) -> io::Result<()> {
// Make sure the magic number is what's expected.
if try!(self.r.read_u32::<LittleEndian>()) != MAGIC {
return Err(io::Error::new(io::ErrorKind::InvalidInput, ""))
}
let mut bits = [0; 3];
try!(self.r.read(&mut bits[..2]));
let flg = bits[0];
let bd = bits[1];
// bits 7/6, the version number. Right now this must be 1
if (flg >> 6) != 0b01 {
return Err(io::Error::new(io::ErrorKind::InvalidInput, ""))
}
// bit 5 is the "block independence", don't care about this yet
// bit 4 is whether blocks have checksums or not
self.blk_checksum = (flg & 0x10) != 0;
// bit 3 is whether there is a following stream size
let stream_size = (flg & 0x08) != 0;
// bit 2 is whether there is a stream checksum
self.stream_checksum = (flg & 0x04) != 0;
// bit 1 is reserved
// bit 0 is whether there is a preset dictionary
let preset_dictionary = (flg & 0x01) != 0;
static MAX_SIZES: [usize; 8] =
[0, 0, 0, 0, // all N/A
64 << 10, // 64KB
256 << 10, // 256 KB
1 << 20, // 1MB
4 << 20]; // 4MB
// bit 7 is reserved
// bits 6-4 are the maximum block size
let max_block_size = MAX_SIZES[(bd >> 4) as usize & 0x7];
// bits 3-0 are reserved
// read off other portions of the stream
let size = if stream_size {
Some(try!(self.r.read_u64::<LittleEndian>()))
} else {
None
};
assert!(!preset_dictionary, "preset dictionaries not supported yet");
debug!("blk: {}", self.blk_checksum);
debug!("stream: {}", self.stream_checksum);
debug!("max size: {}", max_block_size);
debug!("stream size: {:?}", size);
self.max_block_size = max_block_size;
// XXX: implement checksums
let cksum = try!(self.r.read_u8());
debug!("ignoring header checksum: {}", cksum);
return Ok(());
}
fn decode_block(&mut self) -> io::Result<bool> {
match try!(self.r.read_u32::<LittleEndian>()) {
// final block, we're done here
0 => return Ok(false),
// raw block to read
n if n & 0x80000000 != 0 => {
let amt = (n & 0x7fffffff) as usize;
self.output.truncate(0);
self.output.reserve(amt);
try!(self.r.push_exactly(amt as u64, &mut self.output));
self.start = 0;
self.end = amt;
}
// actual block to decompress
n => {
let n = n as usize;
self.temp.truncate(0);
self.temp.reserve(n);
try!(self.r.push_exactly(n as u64, &mut self.temp));
let target = cmp::min(self.max_block_size, 4 * n / 3);
self.output.truncate(0);
self.output.reserve(target);
let mut decoder = BlockDecoder {
input: &self.temp[..n],
output: &mut self.output,
cur: 0,
start: 0,
end: 0,
};
self.start = 0;
self.end = decoder.decode();
}
}
if self.blk_checksum {
let cksum = try!(self.r.read_u32::<LittleEndian>());
debug!("ignoring block checksum {}", cksum);
}
return Ok(true);
}
/// Tests whether the end of this LZ4 stream has been reached
pub fn eof(&mut self) -> bool { self.eof }
}
impl<R: Read> Read for Decoder<R> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
if self.eof { return Ok(0) }
if !self.header {
try!(self.read_header());
self.header = true;
}
let mut amt = dst.len();
let len = amt;
while amt > 0 {
if self.start == self.end {
let keep_going = try!(self.decode_block());
if !keep_going {
self.eof = true;
break;
}
}
let n = cmp::min(amt, self.end - self.start);
unsafe { copy_nonoverlapping(
&self.output[self.start],
&mut dst[len - amt],
n
)};
self.start += n;
amt -= n;
}
Ok(len - amt)
}
}
/// This structure is used to compress a stream of bytes using the LZ4
/// compression algorithm. This is a wrapper around an internal writer which
/// bytes will be written to.
pub struct En | > {
w: W,
buf: Vec<u8>,
tmp: Vec<u8>,
wrote_header: bool,
limit: usize,
}
impl<W: Write> Encoder<W> {
/// Creates a new encoder which will have its output written to the given
/// output stream. The output stream can be re-acquired by calling
/// `finish()`
///
/// NOTE: compression isn't actually implemented just yet, this is just a
/// skeleton of a future implementation.
pub fn new(w: W) -> Encoder<W> {
Encoder {
w: w,
wrote_header: false,
buf: Vec::with_capacity(1024),
tmp: Vec::new(),
limit: 256 * 1024,
}
}
fn encode_block(&mut self) -> io::Result<()> {
self.tmp.truncate(0);
if self.compress() {
try!(self.w.write_u32::<LittleEndian>(self.tmp.len() as u32));
try!(self.w.write(&self.tmp));
} else {
try!(self.w.write_u32::<LittleEndian>((self.buf.len() as u32) | 0x80000000));
try!(self.w.write(&self.buf));
}
self.buf.truncate(0);
Ok(())
}
fn compress(&mut self) -> bool {
false
}
/// This function is used to flag that this session of compression is done
/// with. The stream is finished up (final bytes are written), and then the
/// wrapped writer is returned.
pub fn finish(mut self) -> (W, io::Result<()>) {
let mut result = self.flush();
for _ in 0..2 {
let tmp = self.w.write_u32::<LittleEndian>(0)
.map_err(byteorder_err_to_io);
result = result.and_then(|_| tmp);
}
(self.w, result)
}
}
impl<W: Write> Write for Encoder<W> {
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
if !self.wrote_header {
try!(self.w.write_u32::<LittleEndian>(MAGIC));
// version 01, turn on block independence, but turn off
// everything else (we have no checksums right now).
try!(self.w.write_u8(0b01_100000));
// Maximum block size is 256KB
try!(self.w.write_u8(0b0_101_0000));
// XXX: this checksum is just plain wrong.
try!(self.w.write_u8(0));
self.wrote_header = true;
}
while buf.len() > 0 {
let amt = cmp::min(self.limit - self.buf.len(), buf.len());
self.buf.extend(buf[..amt].iter().map(|b| *b));
if self.buf.len() == self.limit {
try!(self.encode_block());
}
buf = &buf[amt..];
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
if self.buf.len() > 0 {
try!(self.encode_block());
}
self.w.flush()
}
}
/// Decodes pure LZ4 block into output. Returns count of bytes
/// processed.
pub fn decode_block(input: &[u8], output: &mut Vec<u8>) -> usize {
let mut b = BlockDecoder {
input: input,
output: output,
cur: 0,
start: 0,
end: 0
};
b.decode()
}
/// Encodes input into pure LZ4 block. Return count of bytes
/// processed.
pub fn encode_block(input: &[u8], output: &mut Vec<u8>) -> usize {
let mut encoder = BlockEncoder {
input: input,
output: output,
hash_table: repeat(0).take(HASH_TABLE_SIZE as usize).collect(),
pos: 0,
anchor: 0,
dest_pos: 0
};
encoder.encode() as usize
}
#[cfg(test)]
mod test {
use std::io::{BufReader, BufWriter, Read, Write};
use super::super::rand;
use super::{Decoder, Encoder};
#[cfg(feature="unstable")]
use test;
use super::super::byteorder::ReadBytesExt;
fn test_decode(input: &[u8], output: &[u8]) {
let mut d = Decoder::new(BufReader::new(input));
let mut buf = Vec::new();
d.read_to_end(&mut buf).unwrap();
assert!(&buf[..] == output);
}
#[test]
fn decode() {
let reference = include_bytes!("data/test.txt");
test_decode(include_bytes!("data/test.lz4.1"), reference);
test_decode(include_bytes!("data/test.lz4.2"), reference);
test_decode(include_bytes!("data/test.lz4.3"), reference);
test_decode(include_bytes!("data/test.lz4.4"), reference);
test_decode(include_bytes!("data/test.lz4.5"), reference);
test_decode(include_bytes!("data/test.lz4.6"), reference);
test_decode(include_bytes!("data/test.lz4.7"), reference);
test_decode(include_bytes!("data/test.lz4.8"), reference);
test_decode(include_bytes!("data/test.lz4.9"), reference);
}
#[test]
fn raw_encode_block() {
let data = include_bytes!("data/test.txt");
let mut encoded = Vec::new();
super::encode_block(data, &mut encoded);
let mut decoded = Vec::new();
super::decode_block(&encoded[..], &mut decoded);
assert_eq!(&data[..], &decoded[..]);
}
#[test]
fn one_byte_at_a_time() {
let input = include_bytes!("data/test.lz4.1");
let mut d = Decoder::new(BufReader::new(&input[..]));
assert!(!d.eof());
let mut out = Vec::new();
loop {
match d.read_u8() {
Ok(b) => out.push(b),
Err(..) => break
}
}
assert!(d.eof());
assert!(&out[..] == &include_bytes!("data/test.txt")[..]);
}
#[test]
fn random_byte_lengths() {
let input = include_bytes!("data/test.lz4.1");
let mut d = Decoder::new(BufReader::new(&input[..]));
let mut out = Vec::new();
let mut buf = [0u8; 40];
loop {
match d.read(&mut buf[..(1 + rand::random::<usize>() % 40)]) {
Ok(0) => break,
Ok(n) => {
out.extend(buf[..n].iter().map(|b| *b));
}
Err(..) => break
}
}
assert!(&out[..] == &include_bytes!("data/test.txt")[..]);
}
fn roundtrip(bytes: &[u8]) {
let mut e = Encoder::new(BufWriter::new(Vec::new()));
e.write(bytes).unwrap();
let (e, err) = e.finish();
err.unwrap();
let encoded = e.into_inner().unwrap();
let mut d = Decoder::new(BufReader::new(&encoded[..]));
let mut decoded = Vec::new();
d.read_to_end(&mut decoded).unwrap();
assert_eq!(&decoded[..], bytes);
}
#[test]
fn some_roundtrips() {
roundtrip(b"test");
roundtrip(b"");
roundtrip(include_bytes!("data/test.txt"));
}
#[cfg(feature="unstable")]
#[bench]
fn decompress_speed(bh: &mut test::Bencher) {
let input = include_bytes!("data/test.lz4.9");
let mut d = Decoder::new(BufReader::new(&input[..]));
let mut output = [0u8; 65536];
let mut output_size = 0;
bh.iter(|| {
d.r = BufReader::new(&input[..]);
d.reset();
output_size = d.read(&mut output).unwrap();
});
bh.bytes = output_size as u64;
}
}
| coder<W | identifier_name |
lz4.rs | /*!
LZ4 Decompression and Compression. Requires `lz4` feature, enabled by default
This module contains an implementation in Rust of decompression and compression
of LZ4-encoded streams. These are exposed as a standard `Reader` and `Writer`
interfaces wrapping an underlying stream.
| use std::fs::File;
use std::path::Path;
use std::io::Read;
let stream = File::open(&Path::new("path/to/file.lz4")).unwrap();
let mut decompressed = Vec::new();
lz4::Decoder::new(stream).read_to_end(&mut decompressed);
```
# Credit
This implementation is largely based on Branimir Karadžić's implementation which
can be found at https://github.com/bkaradzic/go-lz4.
*/
use std::cmp;
use std::ptr::copy_nonoverlapping;
use std::io::{self, Read, Write};
use std::iter::repeat;
use std::vec::Vec;
use std::num::Wrapping;
use std::ops::Shr;
use super::byteorder::{LittleEndian, WriteBytesExt, ReadBytesExt};
use super::{ReadExact, byteorder_err_to_io};
const MAGIC: u32 = 0x184d2204;
const ML_BITS: u32 = 4;
const ML_MASK: u32 = (1 << ML_BITS as usize) - 1;
const RUN_BITS: u32 = 8 - ML_BITS;
const RUN_MASK: u32 = (1 << RUN_BITS as usize) - 1;
const MIN_MATCH: u32 = 4;
const HASH_LOG: u32 = 17;
const HASH_TABLE_SIZE: u32 = 1 << (HASH_LOG as usize);
const HASH_SHIFT: u32 = (MIN_MATCH * 8) - HASH_LOG;
const INCOMPRESSIBLE: u32 = 128;
const UNINITHASH: u32 = 0x88888888;
const MAX_INPUT_SIZE: u32 = 0x7e000000;
struct BlockDecoder<'a> {
input: &'a [u8],
output: &'a mut Vec<u8>,
cur: usize,
start: usize,
end: usize,
}
impl<'a> BlockDecoder<'a> {
/// Decodes this block of data from 'input' to 'output', returning the
/// number of valid bytes in the output.
fn decode(&mut self) -> usize {
while self.cur < self.input.len() {
let code = self.bump();
debug!("block with code: {:x}", code);
// Extract a chunk of data from the input to the output.
{
let len = self.length(code >> 4);
debug!("consume len {}", len);
if len > 0 {
let end = self.end;
self.grow_output(end + len);
unsafe { copy_nonoverlapping(
&self.input[self.cur],
&mut self.output[end],
len
)};
self.end += len;
self.cur += len;
}
}
if self.cur == self.input.len() { break }
// Read off the next i16 offset
{
let back = (self.bump() as usize) | ((self.bump() as usize) << 8);
debug!("found back {}", back);
self.start = self.end - back;
}
// Slosh around some bytes now
{
let mut len = self.length(code & 0xf);
let literal = self.end - self.start;
if literal < 4 {
static DECR: [usize; 4] = [0, 3, 2, 3];
self.cp(4, DECR[literal]);
} else {
len += 4;
}
self.cp(len, 0);
}
}
self.end
}
fn length(&mut self, code: u8) -> usize {
let mut ret = code as usize;
if code == 0xf {
loop {
let tmp = self.bump();
ret += tmp as usize;
if tmp != 0xff { break }
}
}
ret
}
fn bump(&mut self) -> u8 {
let ret = self.input[self.cur];
self.cur += 1;
ret
}
#[inline]
fn cp(&mut self, len: usize, decr: usize) {
let end = self.end;
self.grow_output(end + len);
for i in 0..len {
self.output[end + i] = (*self.output)[self.start + i];
}
self.end += len;
self.start += len - decr;
}
// Extends the output vector to a target number of bytes (in total), but
// does not actually initialize the new data. The length of the vector is
// updated, but the bytes will all have undefined values. It is assumed that
// the next operation is to pave over these bytes (so the initialization is
// unnecessary).
#[inline]
fn grow_output(&mut self, target: usize) {
if self.output.capacity() < target {
debug!("growing {} to {}", self.output.capacity(), target);
//let additional = target - self.output.capacity();
//self.output.reserve(additional);
while self.output.len() < target {
self.output.push(0);
}
}else {
unsafe {
self.output.set_len(target);
}
}
}
}
struct BlockEncoder<'a> {
input: &'a [u8],
output: &'a mut Vec<u8>,
hash_table: Vec<u32>,
pos: u32,
anchor: u32,
dest_pos: u32
}
/// Returns maximum possible size of compressed output
/// given source size
pub fn compression_bound(size: u32) -> Option<u32> {
if size > MAX_INPUT_SIZE {
None
} else {
Some(size + (size / 255) + 16 + 4)
}
}
impl<'a> BlockEncoder<'a> {
#[inline(always)]
fn seq_at(&self, pos: u32) -> u32 {
(self.input[pos as usize + 3] as u32) << 24
| (self.input[pos as usize + 2] as u32) << 16
| (self.input[pos as usize + 1] as u32) << 8
| (self.input[pos as usize] as u32)
}
fn write_literals(&mut self, len: u32, ml_len: u32, pos: u32) {
let mut ln = len;
let code = if ln > RUN_MASK - 1 { RUN_MASK as u8 } else { ln as u8 };
if ml_len > ML_MASK - 1 {
self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ML_MASK as u8;
} else {
self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ml_len as u8;
}
self.dest_pos += 1;
if code == RUN_MASK as u8 {
ln -= RUN_MASK;
while ln > 254 {
self.output[self.dest_pos as usize] = 255;
self.dest_pos += 1;
ln -= 255;
}
self.output[self.dest_pos as usize] = ln as u8;
self.dest_pos += 1;
}
// FIXME: find out why slicing syntax fails tests
//self.output[self.dest_pos as usize .. (self.dest_pos + len) as usize] = self.input[pos as uint.. (pos + len) as uint];
for i in 0..(len as usize) {
self.output[self.dest_pos as usize + i] = self.input[pos as usize + i];
}
self.dest_pos += len;
}
fn encode(&mut self) -> u32 {
let input_len = self.input.len() as u32;
match compression_bound(input_len) {
None => 0,
Some(out_size) => {
let out_size_usize = out_size as usize;
if self.output.capacity() < out_size_usize {
let additional = out_size_usize - self.output.capacity();
self.output.reserve(additional);
}
unsafe {self.output.set_len(out_size_usize); }
let mut step = 1u32;
let mut limit = INCOMPRESSIBLE;
loop {
if self.pos + 12 > input_len {
let tmp = self.anchor;
self.write_literals(self.input.len() as u32 - tmp, 0, tmp);
unsafe { self.output.set_len(self.dest_pos as usize) };
return self.dest_pos;
}
let seq = self.seq_at(self.pos);
let hash = (Wrapping(seq) * Wrapping(2654435761)).shr(HASH_SHIFT as usize).0;
let mut r = (Wrapping(self.hash_table[hash as usize]) + Wrapping(UNINITHASH)).0;
self.hash_table[hash as usize] = (Wrapping(self.pos) - Wrapping(UNINITHASH)).0;
if (Wrapping(self.pos) - Wrapping(r)).shr(16).0 != 0 || seq != self.seq_at(r) {
if self.pos - self.anchor > limit {
limit = limit << 1;
step += 1 + (step >> 2);
}
self.pos += step;
continue;
}
if step > 1 {
self.hash_table[hash as usize] = r - UNINITHASH;
self.pos -= step - 1;
step = 1;
continue;
}
limit = INCOMPRESSIBLE;
let ln = self.pos - self.anchor;
let back = self.pos - r;
let anchor = self.anchor;
self.pos += MIN_MATCH;
r += MIN_MATCH;
self.anchor = self.pos;
while (self.pos < input_len - 5) && self.input[self.pos as usize] == self.input[r as usize] {
self.pos += 1;
r += 1
}
let mut ml_len = self.pos - self.anchor;
self.write_literals(ln, ml_len, anchor);
self.output[self.dest_pos as usize] = back as u8;
self.output[self.dest_pos as usize + 1] = (back >> 8) as u8;
self.dest_pos += 2;
if ml_len > ML_MASK - 1 {
ml_len -= ML_MASK;
while ml_len > 254 {
ml_len -= 255;
self.output[self.dest_pos as usize] = 255;
self.dest_pos += 1;
}
self.output[self.dest_pos as usize] = ml_len as u8;
self.dest_pos += 1;
}
self.anchor = self.pos;
}
}
}
}
}
/// This structure is used to decode a stream of LZ4 blocks. This wraps an
/// internal reader which is read from when this decoder's read method is
/// called.
pub struct Decoder<R> {
/// The internally wrapped reader. This is exposed so it may be moved out
/// of. Note that if data is read from the reader while decoding is in
/// progress the output stream will get corrupted.
pub r: R,
temp: Vec<u8>,
output: Vec<u8>,
start: usize,
end: usize,
eof: bool,
header: bool,
blk_checksum: bool,
stream_checksum: bool,
max_block_size: usize,
}
impl<R: Read + Sized> Decoder<R> {
/// Creates a new decoder which will read data from the given stream. The
/// inner stream can be re-acquired by moving out of the `r` field of this
/// structure.
pub fn new(r: R) -> Decoder<R> {
Decoder {
r: r,
temp: Vec::new(),
output: Vec::new(),
header: false,
blk_checksum: false,
stream_checksum: false,
start: 0,
end: 0,
eof: false,
max_block_size: 0,
}
}
/// Resets this decoder back to its initial state. Note that the underlying
/// stream is not seeked on or has any alterations performed on it.
pub fn reset(&mut self) {
self.header = false;
self.eof = false;
self.start = 0;
self.end = 0;
}
fn read_header(&mut self) -> io::Result<()> {
// Make sure the magic number is what's expected.
if try!(self.r.read_u32::<LittleEndian>()) != MAGIC {
return Err(io::Error::new(io::ErrorKind::InvalidInput, ""))
}
let mut bits = [0; 3];
try!(self.r.read(&mut bits[..2]));
let flg = bits[0];
let bd = bits[1];
// bits 7/6, the version number. Right now this must be 1
if (flg >> 6) != 0b01 {
return Err(io::Error::new(io::ErrorKind::InvalidInput, ""))
}
// bit 5 is the "block independence", don't care about this yet
// bit 4 is whether blocks have checksums or not
self.blk_checksum = (flg & 0x10) != 0;
// bit 3 is whether there is a following stream size
let stream_size = (flg & 0x08) != 0;
// bit 2 is whether there is a stream checksum
self.stream_checksum = (flg & 0x04) != 0;
// bit 1 is reserved
// bit 0 is whether there is a preset dictionary
let preset_dictionary = (flg & 0x01) != 0;
static MAX_SIZES: [usize; 8] =
[0, 0, 0, 0, // all N/A
64 << 10, // 64KB
256 << 10, // 256 KB
1 << 20, // 1MB
4 << 20]; // 4MB
// bit 7 is reserved
// bits 6-4 are the maximum block size
let max_block_size = MAX_SIZES[(bd >> 4) as usize & 0x7];
// bits 3-0 are reserved
// read off other portions of the stream
let size = if stream_size {
Some(try!(self.r.read_u64::<LittleEndian>()))
} else {
None
};
assert!(!preset_dictionary, "preset dictionaries not supported yet");
debug!("blk: {}", self.blk_checksum);
debug!("stream: {}", self.stream_checksum);
debug!("max size: {}", max_block_size);
debug!("stream size: {:?}", size);
self.max_block_size = max_block_size;
// XXX: implement checksums
let cksum = try!(self.r.read_u8());
debug!("ignoring header checksum: {}", cksum);
return Ok(());
}
fn decode_block(&mut self) -> io::Result<bool> {
match try!(self.r.read_u32::<LittleEndian>()) {
// final block, we're done here
0 => return Ok(false),
// raw block to read
n if n & 0x80000000 != 0 => {
let amt = (n & 0x7fffffff) as usize;
self.output.truncate(0);
self.output.reserve(amt);
try!(self.r.push_exactly(amt as u64, &mut self.output));
self.start = 0;
self.end = amt;
}
// actual block to decompress
n => {
let n = n as usize;
self.temp.truncate(0);
self.temp.reserve(n);
try!(self.r.push_exactly(n as u64, &mut self.temp));
let target = cmp::min(self.max_block_size, 4 * n / 3);
self.output.truncate(0);
self.output.reserve(target);
let mut decoder = BlockDecoder {
input: &self.temp[..n],
output: &mut self.output,
cur: 0,
start: 0,
end: 0,
};
self.start = 0;
self.end = decoder.decode();
}
}
if self.blk_checksum {
let cksum = try!(self.r.read_u32::<LittleEndian>());
debug!("ignoring block checksum {}", cksum);
}
return Ok(true);
}
/// Tests whether the end of this LZ4 stream has been reached
pub fn eof(&mut self) -> bool { self.eof }
}
impl<R: Read> Read for Decoder<R> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
if self.eof { return Ok(0) }
if !self.header {
try!(self.read_header());
self.header = true;
}
let mut amt = dst.len();
let len = amt;
while amt > 0 {
if self.start == self.end {
let keep_going = try!(self.decode_block());
if !keep_going {
self.eof = true;
break;
}
}
let n = cmp::min(amt, self.end - self.start);
unsafe { copy_nonoverlapping(
&self.output[self.start],
&mut dst[len - amt],
n
)};
self.start += n;
amt -= n;
}
Ok(len - amt)
}
}
/// This structure is used to compress a stream of bytes using the LZ4
/// compression algorithm. This is a wrapper around an internal writer which
/// bytes will be written to.
pub struct Encoder<W> {
w: W,
buf: Vec<u8>,
tmp: Vec<u8>,
wrote_header: bool,
limit: usize,
}
impl<W: Write> Encoder<W> {
/// Creates a new encoder which will have its output written to the given
/// output stream. The output stream can be re-acquired by calling
/// `finish()`
///
/// NOTE: compression isn't actually implemented just yet, this is just a
/// skeleton of a future implementation.
pub fn new(w: W) -> Encoder<W> {
Encoder {
w: w,
wrote_header: false,
buf: Vec::with_capacity(1024),
tmp: Vec::new(),
limit: 256 * 1024,
}
}
fn encode_block(&mut self) -> io::Result<()> {
self.tmp.truncate(0);
if self.compress() {
try!(self.w.write_u32::<LittleEndian>(self.tmp.len() as u32));
try!(self.w.write(&self.tmp));
} else {
try!(self.w.write_u32::<LittleEndian>((self.buf.len() as u32) | 0x80000000));
try!(self.w.write(&self.buf));
}
self.buf.truncate(0);
Ok(())
}
fn compress(&mut self) -> bool {
false
}
/// This function is used to flag that this session of compression is done
/// with. The stream is finished up (final bytes are written), and then the
/// wrapped writer is returned.
pub fn finish(mut self) -> (W, io::Result<()>) {
let mut result = self.flush();
for _ in 0..2 {
let tmp = self.w.write_u32::<LittleEndian>(0)
.map_err(byteorder_err_to_io);
result = result.and_then(|_| tmp);
}
(self.w, result)
}
}
impl<W: Write> Write for Encoder<W> {
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
if !self.wrote_header {
try!(self.w.write_u32::<LittleEndian>(MAGIC));
// version 01, turn on block independence, but turn off
// everything else (we have no checksums right now).
try!(self.w.write_u8(0b01_100000));
// Maximum block size is 256KB
try!(self.w.write_u8(0b0_101_0000));
// XXX: this checksum is just plain wrong.
try!(self.w.write_u8(0));
self.wrote_header = true;
}
while buf.len() > 0 {
let amt = cmp::min(self.limit - self.buf.len(), buf.len());
self.buf.extend(buf[..amt].iter().map(|b| *b));
if self.buf.len() == self.limit {
try!(self.encode_block());
}
buf = &buf[amt..];
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
if self.buf.len() > 0 {
try!(self.encode_block());
}
self.w.flush()
}
}
/// Decodes pure LZ4 block into output. Returns count of bytes
/// processed.
pub fn decode_block(input: &[u8], output: &mut Vec<u8>) -> usize {
let mut b = BlockDecoder {
input: input,
output: output,
cur: 0,
start: 0,
end: 0
};
b.decode()
}
/// Encodes input into pure LZ4 block. Return count of bytes
/// processed.
pub fn encode_block(input: &[u8], output: &mut Vec<u8>) -> usize {
let mut encoder = BlockEncoder {
input: input,
output: output,
hash_table: repeat(0).take(HASH_TABLE_SIZE as usize).collect(),
pos: 0,
anchor: 0,
dest_pos: 0
};
encoder.encode() as usize
}
#[cfg(test)]
mod test {
use std::io::{BufReader, BufWriter, Read, Write};
use super::super::rand;
use super::{Decoder, Encoder};
#[cfg(feature="unstable")]
use test;
use super::super::byteorder::ReadBytesExt;
fn test_decode(input: &[u8], output: &[u8]) {
let mut d = Decoder::new(BufReader::new(input));
let mut buf = Vec::new();
d.read_to_end(&mut buf).unwrap();
assert!(&buf[..] == output);
}
#[test]
fn decode() {
let reference = include_bytes!("data/test.txt");
test_decode(include_bytes!("data/test.lz4.1"), reference);
test_decode(include_bytes!("data/test.lz4.2"), reference);
test_decode(include_bytes!("data/test.lz4.3"), reference);
test_decode(include_bytes!("data/test.lz4.4"), reference);
test_decode(include_bytes!("data/test.lz4.5"), reference);
test_decode(include_bytes!("data/test.lz4.6"), reference);
test_decode(include_bytes!("data/test.lz4.7"), reference);
test_decode(include_bytes!("data/test.lz4.8"), reference);
test_decode(include_bytes!("data/test.lz4.9"), reference);
}
#[test]
fn raw_encode_block() {
let data = include_bytes!("data/test.txt");
let mut encoded = Vec::new();
super::encode_block(data, &mut encoded);
let mut decoded = Vec::new();
super::decode_block(&encoded[..], &mut decoded);
assert_eq!(&data[..], &decoded[..]);
}
#[test]
fn one_byte_at_a_time() {
let input = include_bytes!("data/test.lz4.1");
let mut d = Decoder::new(BufReader::new(&input[..]));
assert!(!d.eof());
let mut out = Vec::new();
loop {
match d.read_u8() {
Ok(b) => out.push(b),
Err(..) => break
}
}
assert!(d.eof());
assert!(&out[..] == &include_bytes!("data/test.txt")[..]);
}
#[test]
fn random_byte_lengths() {
let input = include_bytes!("data/test.lz4.1");
let mut d = Decoder::new(BufReader::new(&input[..]));
let mut out = Vec::new();
let mut buf = [0u8; 40];
loop {
match d.read(&mut buf[..(1 + rand::random::<usize>() % 40)]) {
Ok(0) => break,
Ok(n) => {
out.extend(buf[..n].iter().map(|b| *b));
}
Err(..) => break
}
}
assert!(&out[..] == &include_bytes!("data/test.txt")[..]);
}
fn roundtrip(bytes: &[u8]) {
let mut e = Encoder::new(BufWriter::new(Vec::new()));
e.write(bytes).unwrap();
let (e, err) = e.finish();
err.unwrap();
let encoded = e.into_inner().unwrap();
let mut d = Decoder::new(BufReader::new(&encoded[..]));
let mut decoded = Vec::new();
d.read_to_end(&mut decoded).unwrap();
assert_eq!(&decoded[..], bytes);
}
#[test]
fn some_roundtrips() {
roundtrip(b"test");
roundtrip(b"");
roundtrip(include_bytes!("data/test.txt"));
}
#[cfg(feature="unstable")]
#[bench]
fn decompress_speed(bh: &mut test::Bencher) {
let input = include_bytes!("data/test.lz4.9");
let mut d = Decoder::new(BufReader::new(&input[..]));
let mut output = [0u8; 65536];
let mut output_size = 0;
bh.iter(|| {
d.r = BufReader::new(&input[..]);
d.reset();
output_size = d.read(&mut output).unwrap();
});
bh.bytes = output_size as u64;
}
} | # Example
```rust,ignore
use compress::lz4; | random_line_split |
single_two_stage17_6_prw.py | import torch
import torch.nn as nn
import numpy as np
from collections import defaultdict
import torch.nn.functional as F
# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
from mmdet.core import bbox2result_reid
from ..roi_heads.bbox_heads.oim_nae_new import OIMLoss
from ..dense_heads.labeled_matching_layer_queue import LabeledMatchingLayerQueue
from ..dense_heads.unlabeled_matching_layer import UnlabeledMatchingLayer
from ..dense_heads.triplet_loss import TripletLossFilter
from ..utils import MINE
@DETECTORS.register_module()
class SingleTwoStageDetector176PRW(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SingleTwoStageDetector176PRW, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None: | if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
self.loss_oim = OIMLoss()
num_person = 5532
queue_size = 5000
self.labeled_matching_layer = LabeledMatchingLayerQueue(num_persons=num_person, feat_len=256) # for mot17half
self.unlabeled_matching_layer = UnlabeledMatchingLayer(queue_size=queue_size, feat_len=256)
self.loss_tri = TripletLossFilter()
# self.mi_estimator = CLUBSample(256, 256, 512)
# self.mi_estimator = CLUB()
self.mi_estimator = MINE(256, 256, 256)
# self.mi_estimator = L1OutUB(256, 256, 256)
@property
def with_rpn(self):
"""bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(SingleTwoStageDetector176PRW, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
self.bbox_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
xb = self.backbone(img)
if self.with_neck:
xn = self.neck(xb)
#for xx in xb:
# print(xx.shape)
# print(xb[2].shape)
return [xb[2]], xn
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
outs = ()
# backbone
xb, xn = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(xb)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(xb, proposals)
outs = outs + (roi_outs, )
outs_n = self.bbox_head(xn)
return outs, outs_n
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_ids,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
xb, xn = self.extract_feat(img)
#print("here", xb.shape)
losses = dict()
# RPN forward and loss
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(xb)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_metas)
rpn_losses = self.rpn_head.loss(*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get("rpn_proposal", self.test_cfg.rpn)
proposal_list = self.rpn_head.get_bboxes(*rpn_outs, img_metas, cfg=proposal_cfg)
else:
proposal_list = proposals
roi_losses, feats_pids_roi = self.roi_head.forward_train(xb, img_metas, proposal_list,
gt_bboxes, gt_labels, gt_ids,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
# oim_loss = dict()
# oim_loss["loss_oim_roi"] = self.loss_oim(feats_pids_roi["bbox_feats"], feats_pids_roi["gt_pids"])
# losses.update(oim_loss)
single_losses, feats_pids = self.bbox_head.forward_train(xn, img_metas, gt_bboxes,
gt_labels, gt_ids, gt_bboxes_ignore)
# pos_reid = feats_pids["pos_reid"]
# pos_reid_ids = feats_pids["pos_reid_ids"]
# labeled_matching_scores, labeled_matching_reid, labeled_matching_ids = self.labeled_matching_layer(pos_reid, pos_reid_ids)
# labeled_matching_scores *= 10
# unlabeled_matching_scores = self.unlabeled_matching_layer(pos_reid, pos_reid_ids)
# unlabeled_matching_scores *= 10
# matching_scores = torch.cat((labeled_matching_scores, unlabeled_matching_scores), dim=1)
# pid_labels = pos_reid_ids.clone()
# pid_labels[pid_labels == -2] = -1
# p_i = F.softmax(matching_scores, dim=1)
# focal_p_i = (1 - p_i)**2 * p_i.log()
# #loss_oim = F.nll_loss(focal_p_i, pid_labels, reduction='none', ignore_index=-1)
# loss_oim = F.nll_loss(focal_p_i, pid_labels, ignore_index=-1)
# pos_reid = torch.cat((pos_reid, labeled_matching_reid), dim=0)
# pid_labels = torch.cat((pid_labels, labeled_matching_ids), dim=0)
# loss_tri = self.loss_tri(pos_reid, pid_labels)
# single_losses["loss_oim_singel"] = loss_oim
# single_losses["loss_tri"] = loss_tri
####### calculate mutual information ############
feats_roi = feats_pids_roi["bbox_feats"]
pids_roi = feats_pids_roi["gt_pids"]
feats_fcos = feats_pids["pos_reid"]
pids_fcos = feats_pids["pos_reid_ids"]
dic1 = defaultdict(list)
dic2 = defaultdict(list)
for i in range(len(pids_roi)):
if pids_roi[i] < 0:
continue
else:
targets1_value = pids_roi[i].cpu().numpy().item()
dic1[targets1_value].append(feats_roi[i])
for i in range(len(pids_fcos)):
if pids_fcos[i] < 0:
continue
else:
targets2_value = pids_fcos[i].cpu().numpy().item()
dic2[targets2_value].append(feats_fcos[i])
all_feats1 = []
all_feats2 = []
for key, val in dic1.items():
if key in dic2:
val2 = dic2[key]
feat1 = sum(val)/len(val)
# print(feat1.shape)
mean1 = F.normalize(feat1.unsqueeze(0))
# mean1 = feat1.unsqueeze(0)
feat2 = sum(val2)/len(val2)
mean2 = F.normalize(feat2.unsqueeze(0))
# mean2 = feat2.unsqueeze(0)
all_feats1.append(mean1)
all_feats2.append(mean2)
if len(all_feats1) > 0 and len(all_feats2) >0:
all_feats1 = torch.cat(all_feats1)
all_feats2 = torch.cat(all_feats2)
# print(all_feats1.shape, all_feats2.shape)
all_feats1_d = all_feats1.detach()
all_feats2_d = all_feats2.detach()
mi_loss = dict()
if torch.randint(1, 100, (1,)) % 3:
self.mi_estimator.train()
mi_loss["loss_mi"] = 0.2 * self.mi_estimator.learning_loss(all_feats1_d, all_feats2_d)
else:
self.mi_estimator.eval()
# mi_loss["loss_mi_bound"] = self.mi_estimator(all_feats1, all_feats2)
mi_loss["loss_mi_bound"] = 0.2 * self.mi_estimator.learning_loss(all_feats1, all_feats2)
losses.update(mi_loss)
# losses.update(single_losses)
for key, val in single_losses.items():
if key in losses:
#print("losses", key, losses[key], losses[key].shape)
#print("val", val, val.shape)
losses[key] += val
else:
losses[key] = val
return losses
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
xb, xn = self.extract_feat(img)
outs_n = self.bbox_head(xn, proposals)
bbox_list = self.bbox_head.get_bboxes(
*outs_n, img_metas, rescale=rescale)
if bbox_list[0][0].shape[0] == 0:
return None, None
# skip post-processing when exporting to ONNX
if torch.onnx.is_in_onnx_export():
return bbox_list
bbox_results_n = [
bbox2result_reid(det_bboxes, det_labels, reid_feats, self.bbox_head.num_classes)
for det_bboxes, det_labels, reid_feats in bbox_list
]
proposals = []
#print('scale_factor')
#print(img_metas[0]['scale_factor']) img_metas[0]['scale_factor']
# print('scale',img_metas.data[0][0]['scale_factor'])
tmp_sf = torch.FloatTensor(np.append(img_metas[0]['scale_factor'], 1)).to('cuda')
#print(tmp_sf)
for det_bboxes, det_labels, reid_feats in bbox_list:
#print("det_bboxes")
#print(det_bboxes.shape)
#print(det_bboxes)
proposals.append(torch.mul(det_bboxes, tmp_sf))
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(xb, img_metas)
else:
proposal_list = proposals
#print("img_metas")
#print(img_metas)
#print("proposal_list")
#print(proposal_list[0].shape)
#print(proposal_list)
bbox_results_b, det_features = self.roi_head.simple_test(
xb, proposal_list, img_metas, rescale=rescale, use_rpn=False)
#print("results_b")
#print("det_features")
#print(len(det_features))
#print(det_features.shape)
bbox_results_b = []
#print("bbox_results_n")
#print(bbox_results_n[0][0].shape)
#print(bbox_results_n[0][0][:, 5:])
bbox_results_b.append(bbox_results_n[0][0].copy())
bbox_results_b[0][:, 5:] = det_features.cpu().numpy()
#print(bbox_results_b[0][:, 5:])
#print(bbox_results_n[0][0][:, 5:])
#print(len(bbox_results_b))
#print(bbox_results_b[0].shape)
#print(bbox_results_b[0][:,:5])
return bbox_results_n, bbox_results_b
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
# recompute feats to save memory
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale) | rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
| random_line_split |
single_two_stage17_6_prw.py | import torch
import torch.nn as nn
import numpy as np
from collections import defaultdict
import torch.nn.functional as F
# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
from mmdet.core import bbox2result_reid
from ..roi_heads.bbox_heads.oim_nae_new import OIMLoss
from ..dense_heads.labeled_matching_layer_queue import LabeledMatchingLayerQueue
from ..dense_heads.unlabeled_matching_layer import UnlabeledMatchingLayer
from ..dense_heads.triplet_loss import TripletLossFilter
from ..utils import MINE
@DETECTORS.register_module()
class SingleTwoStageDetector176PRW(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SingleTwoStageDetector176PRW, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
self.loss_oim = OIMLoss()
num_person = 5532
queue_size = 5000
self.labeled_matching_layer = LabeledMatchingLayerQueue(num_persons=num_person, feat_len=256) # for mot17half
self.unlabeled_matching_layer = UnlabeledMatchingLayer(queue_size=queue_size, feat_len=256)
self.loss_tri = TripletLossFilter()
# self.mi_estimator = CLUBSample(256, 256, 512)
# self.mi_estimator = CLUB()
self.mi_estimator = MINE(256, 256, 256)
# self.mi_estimator = L1OutUB(256, 256, 256)
@property
def with_rpn(self):
"""bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(SingleTwoStageDetector176PRW, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
self.bbox_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
xb = self.backbone(img)
if self.with_neck:
xn = self.neck(xb)
#for xx in xb:
# print(xx.shape)
# print(xb[2].shape)
return [xb[2]], xn
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
outs = ()
# backbone
xb, xn = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(xb)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(xb, proposals)
outs = outs + (roi_outs, )
outs_n = self.bbox_head(xn)
return outs, outs_n
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_ids,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
xb, xn = self.extract_feat(img)
#print("here", xb.shape)
losses = dict()
# RPN forward and loss
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(xb)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_metas)
rpn_losses = self.rpn_head.loss(*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get("rpn_proposal", self.test_cfg.rpn)
proposal_list = self.rpn_head.get_bboxes(*rpn_outs, img_metas, cfg=proposal_cfg)
else:
proposal_list = proposals
roi_losses, feats_pids_roi = self.roi_head.forward_train(xb, img_metas, proposal_list,
gt_bboxes, gt_labels, gt_ids,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
# oim_loss = dict()
# oim_loss["loss_oim_roi"] = self.loss_oim(feats_pids_roi["bbox_feats"], feats_pids_roi["gt_pids"])
# losses.update(oim_loss)
single_losses, feats_pids = self.bbox_head.forward_train(xn, img_metas, gt_bboxes,
gt_labels, gt_ids, gt_bboxes_ignore)
# pos_reid = feats_pids["pos_reid"]
# pos_reid_ids = feats_pids["pos_reid_ids"]
# labeled_matching_scores, labeled_matching_reid, labeled_matching_ids = self.labeled_matching_layer(pos_reid, pos_reid_ids)
# labeled_matching_scores *= 10
# unlabeled_matching_scores = self.unlabeled_matching_layer(pos_reid, pos_reid_ids)
# unlabeled_matching_scores *= 10
# matching_scores = torch.cat((labeled_matching_scores, unlabeled_matching_scores), dim=1)
# pid_labels = pos_reid_ids.clone()
# pid_labels[pid_labels == -2] = -1
# p_i = F.softmax(matching_scores, dim=1)
# focal_p_i = (1 - p_i)**2 * p_i.log()
# #loss_oim = F.nll_loss(focal_p_i, pid_labels, reduction='none', ignore_index=-1)
# loss_oim = F.nll_loss(focal_p_i, pid_labels, ignore_index=-1)
# pos_reid = torch.cat((pos_reid, labeled_matching_reid), dim=0)
# pid_labels = torch.cat((pid_labels, labeled_matching_ids), dim=0)
# loss_tri = self.loss_tri(pos_reid, pid_labels)
# single_losses["loss_oim_singel"] = loss_oim
# single_losses["loss_tri"] = loss_tri
####### calculate mutual information ############
feats_roi = feats_pids_roi["bbox_feats"]
pids_roi = feats_pids_roi["gt_pids"]
feats_fcos = feats_pids["pos_reid"]
pids_fcos = feats_pids["pos_reid_ids"]
dic1 = defaultdict(list)
dic2 = defaultdict(list)
for i in range(len(pids_roi)):
|
for i in range(len(pids_fcos)):
if pids_fcos[i] < 0:
continue
else:
targets2_value = pids_fcos[i].cpu().numpy().item()
dic2[targets2_value].append(feats_fcos[i])
all_feats1 = []
all_feats2 = []
for key, val in dic1.items():
if key in dic2:
val2 = dic2[key]
feat1 = sum(val)/len(val)
# print(feat1.shape)
mean1 = F.normalize(feat1.unsqueeze(0))
# mean1 = feat1.unsqueeze(0)
feat2 = sum(val2)/len(val2)
mean2 = F.normalize(feat2.unsqueeze(0))
# mean2 = feat2.unsqueeze(0)
all_feats1.append(mean1)
all_feats2.append(mean2)
if len(all_feats1) > 0 and len(all_feats2) >0:
all_feats1 = torch.cat(all_feats1)
all_feats2 = torch.cat(all_feats2)
# print(all_feats1.shape, all_feats2.shape)
all_feats1_d = all_feats1.detach()
all_feats2_d = all_feats2.detach()
mi_loss = dict()
if torch.randint(1, 100, (1,)) % 3:
self.mi_estimator.train()
mi_loss["loss_mi"] = 0.2 * self.mi_estimator.learning_loss(all_feats1_d, all_feats2_d)
else:
self.mi_estimator.eval()
# mi_loss["loss_mi_bound"] = self.mi_estimator(all_feats1, all_feats2)
mi_loss["loss_mi_bound"] = 0.2 * self.mi_estimator.learning_loss(all_feats1, all_feats2)
losses.update(mi_loss)
# losses.update(single_losses)
for key, val in single_losses.items():
if key in losses:
#print("losses", key, losses[key], losses[key].shape)
#print("val", val, val.shape)
losses[key] += val
else:
losses[key] = val
return losses
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
xb, xn = self.extract_feat(img)
outs_n = self.bbox_head(xn, proposals)
bbox_list = self.bbox_head.get_bboxes(
*outs_n, img_metas, rescale=rescale)
if bbox_list[0][0].shape[0] == 0:
return None, None
# skip post-processing when exporting to ONNX
if torch.onnx.is_in_onnx_export():
return bbox_list
bbox_results_n = [
bbox2result_reid(det_bboxes, det_labels, reid_feats, self.bbox_head.num_classes)
for det_bboxes, det_labels, reid_feats in bbox_list
]
proposals = []
#print('scale_factor')
#print(img_metas[0]['scale_factor']) img_metas[0]['scale_factor']
# print('scale',img_metas.data[0][0]['scale_factor'])
tmp_sf = torch.FloatTensor(np.append(img_metas[0]['scale_factor'], 1)).to('cuda')
#print(tmp_sf)
for det_bboxes, det_labels, reid_feats in bbox_list:
#print("det_bboxes")
#print(det_bboxes.shape)
#print(det_bboxes)
proposals.append(torch.mul(det_bboxes, tmp_sf))
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(xb, img_metas)
else:
proposal_list = proposals
#print("img_metas")
#print(img_metas)
#print("proposal_list")
#print(proposal_list[0].shape)
#print(proposal_list)
bbox_results_b, det_features = self.roi_head.simple_test(
xb, proposal_list, img_metas, rescale=rescale, use_rpn=False)
#print("results_b")
#print("det_features")
#print(len(det_features))
#print(det_features.shape)
bbox_results_b = []
#print("bbox_results_n")
#print(bbox_results_n[0][0].shape)
#print(bbox_results_n[0][0][:, 5:])
bbox_results_b.append(bbox_results_n[0][0].copy())
bbox_results_b[0][:, 5:] = det_features.cpu().numpy()
#print(bbox_results_b[0][:, 5:])
#print(bbox_results_n[0][0][:, 5:])
#print(len(bbox_results_b))
#print(bbox_results_b[0].shape)
#print(bbox_results_b[0][:,:5])
return bbox_results_n, bbox_results_b
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
# recompute feats to save memory
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
| if pids_roi[i] < 0:
continue
else:
targets1_value = pids_roi[i].cpu().numpy().item()
dic1[targets1_value].append(feats_roi[i]) | conditional_block |
single_two_stage17_6_prw.py | import torch
import torch.nn as nn
import numpy as np
from collections import defaultdict
import torch.nn.functional as F
# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
from mmdet.core import bbox2result_reid
from ..roi_heads.bbox_heads.oim_nae_new import OIMLoss
from ..dense_heads.labeled_matching_layer_queue import LabeledMatchingLayerQueue
from ..dense_heads.unlabeled_matching_layer import UnlabeledMatchingLayer
from ..dense_heads.triplet_loss import TripletLossFilter
from ..utils import MINE
@DETECTORS.register_module()
class SingleTwoStageDetector176PRW(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SingleTwoStageDetector176PRW, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
self.loss_oim = OIMLoss()
num_person = 5532
queue_size = 5000
self.labeled_matching_layer = LabeledMatchingLayerQueue(num_persons=num_person, feat_len=256) # for mot17half
self.unlabeled_matching_layer = UnlabeledMatchingLayer(queue_size=queue_size, feat_len=256)
self.loss_tri = TripletLossFilter()
# self.mi_estimator = CLUBSample(256, 256, 512)
# self.mi_estimator = CLUB()
self.mi_estimator = MINE(256, 256, 256)
# self.mi_estimator = L1OutUB(256, 256, 256)
@property
def with_rpn(self):
|
@property
def with_roi_head(self):
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(SingleTwoStageDetector176PRW, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
self.bbox_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
xb = self.backbone(img)
if self.with_neck:
xn = self.neck(xb)
#for xx in xb:
# print(xx.shape)
# print(xb[2].shape)
return [xb[2]], xn
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
outs = ()
# backbone
xb, xn = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(xb)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(xb, proposals)
outs = outs + (roi_outs, )
outs_n = self.bbox_head(xn)
return outs, outs_n
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_ids,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
xb, xn = self.extract_feat(img)
#print("here", xb.shape)
losses = dict()
# RPN forward and loss
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(xb)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_metas)
rpn_losses = self.rpn_head.loss(*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get("rpn_proposal", self.test_cfg.rpn)
proposal_list = self.rpn_head.get_bboxes(*rpn_outs, img_metas, cfg=proposal_cfg)
else:
proposal_list = proposals
roi_losses, feats_pids_roi = self.roi_head.forward_train(xb, img_metas, proposal_list,
gt_bboxes, gt_labels, gt_ids,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
# oim_loss = dict()
# oim_loss["loss_oim_roi"] = self.loss_oim(feats_pids_roi["bbox_feats"], feats_pids_roi["gt_pids"])
# losses.update(oim_loss)
single_losses, feats_pids = self.bbox_head.forward_train(xn, img_metas, gt_bboxes,
gt_labels, gt_ids, gt_bboxes_ignore)
# pos_reid = feats_pids["pos_reid"]
# pos_reid_ids = feats_pids["pos_reid_ids"]
# labeled_matching_scores, labeled_matching_reid, labeled_matching_ids = self.labeled_matching_layer(pos_reid, pos_reid_ids)
# labeled_matching_scores *= 10
# unlabeled_matching_scores = self.unlabeled_matching_layer(pos_reid, pos_reid_ids)
# unlabeled_matching_scores *= 10
# matching_scores = torch.cat((labeled_matching_scores, unlabeled_matching_scores), dim=1)
# pid_labels = pos_reid_ids.clone()
# pid_labels[pid_labels == -2] = -1
# p_i = F.softmax(matching_scores, dim=1)
# focal_p_i = (1 - p_i)**2 * p_i.log()
# #loss_oim = F.nll_loss(focal_p_i, pid_labels, reduction='none', ignore_index=-1)
# loss_oim = F.nll_loss(focal_p_i, pid_labels, ignore_index=-1)
# pos_reid = torch.cat((pos_reid, labeled_matching_reid), dim=0)
# pid_labels = torch.cat((pid_labels, labeled_matching_ids), dim=0)
# loss_tri = self.loss_tri(pos_reid, pid_labels)
# single_losses["loss_oim_singel"] = loss_oim
# single_losses["loss_tri"] = loss_tri
####### calculate mutual information ############
feats_roi = feats_pids_roi["bbox_feats"]
pids_roi = feats_pids_roi["gt_pids"]
feats_fcos = feats_pids["pos_reid"]
pids_fcos = feats_pids["pos_reid_ids"]
dic1 = defaultdict(list)
dic2 = defaultdict(list)
for i in range(len(pids_roi)):
if pids_roi[i] < 0:
continue
else:
targets1_value = pids_roi[i].cpu().numpy().item()
dic1[targets1_value].append(feats_roi[i])
for i in range(len(pids_fcos)):
if pids_fcos[i] < 0:
continue
else:
targets2_value = pids_fcos[i].cpu().numpy().item()
dic2[targets2_value].append(feats_fcos[i])
all_feats1 = []
all_feats2 = []
for key, val in dic1.items():
if key in dic2:
val2 = dic2[key]
feat1 = sum(val)/len(val)
# print(feat1.shape)
mean1 = F.normalize(feat1.unsqueeze(0))
# mean1 = feat1.unsqueeze(0)
feat2 = sum(val2)/len(val2)
mean2 = F.normalize(feat2.unsqueeze(0))
# mean2 = feat2.unsqueeze(0)
all_feats1.append(mean1)
all_feats2.append(mean2)
if len(all_feats1) > 0 and len(all_feats2) >0:
all_feats1 = torch.cat(all_feats1)
all_feats2 = torch.cat(all_feats2)
# print(all_feats1.shape, all_feats2.shape)
all_feats1_d = all_feats1.detach()
all_feats2_d = all_feats2.detach()
mi_loss = dict()
if torch.randint(1, 100, (1,)) % 3:
self.mi_estimator.train()
mi_loss["loss_mi"] = 0.2 * self.mi_estimator.learning_loss(all_feats1_d, all_feats2_d)
else:
self.mi_estimator.eval()
# mi_loss["loss_mi_bound"] = self.mi_estimator(all_feats1, all_feats2)
mi_loss["loss_mi_bound"] = 0.2 * self.mi_estimator.learning_loss(all_feats1, all_feats2)
losses.update(mi_loss)
# losses.update(single_losses)
for key, val in single_losses.items():
if key in losses:
#print("losses", key, losses[key], losses[key].shape)
#print("val", val, val.shape)
losses[key] += val
else:
losses[key] = val
return losses
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
xb, xn = self.extract_feat(img)
outs_n = self.bbox_head(xn, proposals)
bbox_list = self.bbox_head.get_bboxes(
*outs_n, img_metas, rescale=rescale)
if bbox_list[0][0].shape[0] == 0:
return None, None
# skip post-processing when exporting to ONNX
if torch.onnx.is_in_onnx_export():
return bbox_list
bbox_results_n = [
bbox2result_reid(det_bboxes, det_labels, reid_feats, self.bbox_head.num_classes)
for det_bboxes, det_labels, reid_feats in bbox_list
]
proposals = []
#print('scale_factor')
#print(img_metas[0]['scale_factor']) img_metas[0]['scale_factor']
# print('scale',img_metas.data[0][0]['scale_factor'])
tmp_sf = torch.FloatTensor(np.append(img_metas[0]['scale_factor'], 1)).to('cuda')
#print(tmp_sf)
for det_bboxes, det_labels, reid_feats in bbox_list:
#print("det_bboxes")
#print(det_bboxes.shape)
#print(det_bboxes)
proposals.append(torch.mul(det_bboxes, tmp_sf))
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(xb, img_metas)
else:
proposal_list = proposals
#print("img_metas")
#print(img_metas)
#print("proposal_list")
#print(proposal_list[0].shape)
#print(proposal_list)
bbox_results_b, det_features = self.roi_head.simple_test(
xb, proposal_list, img_metas, rescale=rescale, use_rpn=False)
#print("results_b")
#print("det_features")
#print(len(det_features))
#print(det_features.shape)
bbox_results_b = []
#print("bbox_results_n")
#print(bbox_results_n[0][0].shape)
#print(bbox_results_n[0][0][:, 5:])
bbox_results_b.append(bbox_results_n[0][0].copy())
bbox_results_b[0][:, 5:] = det_features.cpu().numpy()
#print(bbox_results_b[0][:, 5:])
#print(bbox_results_n[0][0][:, 5:])
#print(len(bbox_results_b))
#print(bbox_results_b[0].shape)
#print(bbox_results_b[0][:,:5])
return bbox_results_n, bbox_results_b
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
# recompute feats to save memory
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
| """bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None | identifier_body |
single_two_stage17_6_prw.py | import torch
import torch.nn as nn
import numpy as np
from collections import defaultdict
import torch.nn.functional as F
# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
from mmdet.core import bbox2result_reid
from ..roi_heads.bbox_heads.oim_nae_new import OIMLoss
from ..dense_heads.labeled_matching_layer_queue import LabeledMatchingLayerQueue
from ..dense_heads.unlabeled_matching_layer import UnlabeledMatchingLayer
from ..dense_heads.triplet_loss import TripletLossFilter
from ..utils import MINE
@DETECTORS.register_module()
class SingleTwoStageDetector176PRW(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SingleTwoStageDetector176PRW, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
self.loss_oim = OIMLoss()
num_person = 5532
queue_size = 5000
self.labeled_matching_layer = LabeledMatchingLayerQueue(num_persons=num_person, feat_len=256) # for mot17half
self.unlabeled_matching_layer = UnlabeledMatchingLayer(queue_size=queue_size, feat_len=256)
self.loss_tri = TripletLossFilter()
# self.mi_estimator = CLUBSample(256, 256, 512)
# self.mi_estimator = CLUB()
self.mi_estimator = MINE(256, 256, 256)
# self.mi_estimator = L1OutUB(256, 256, 256)
@property
def | (self):
"""bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(SingleTwoStageDetector176PRW, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
self.bbox_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
xb = self.backbone(img)
if self.with_neck:
xn = self.neck(xb)
#for xx in xb:
# print(xx.shape)
# print(xb[2].shape)
return [xb[2]], xn
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
outs = ()
# backbone
xb, xn = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(xb)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(xb, proposals)
outs = outs + (roi_outs, )
outs_n = self.bbox_head(xn)
return outs, outs_n
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_ids,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
xb, xn = self.extract_feat(img)
#print("here", xb.shape)
losses = dict()
# RPN forward and loss
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(xb)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_metas)
rpn_losses = self.rpn_head.loss(*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get("rpn_proposal", self.test_cfg.rpn)
proposal_list = self.rpn_head.get_bboxes(*rpn_outs, img_metas, cfg=proposal_cfg)
else:
proposal_list = proposals
roi_losses, feats_pids_roi = self.roi_head.forward_train(xb, img_metas, proposal_list,
gt_bboxes, gt_labels, gt_ids,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
# oim_loss = dict()
# oim_loss["loss_oim_roi"] = self.loss_oim(feats_pids_roi["bbox_feats"], feats_pids_roi["gt_pids"])
# losses.update(oim_loss)
single_losses, feats_pids = self.bbox_head.forward_train(xn, img_metas, gt_bboxes,
gt_labels, gt_ids, gt_bboxes_ignore)
# pos_reid = feats_pids["pos_reid"]
# pos_reid_ids = feats_pids["pos_reid_ids"]
# labeled_matching_scores, labeled_matching_reid, labeled_matching_ids = self.labeled_matching_layer(pos_reid, pos_reid_ids)
# labeled_matching_scores *= 10
# unlabeled_matching_scores = self.unlabeled_matching_layer(pos_reid, pos_reid_ids)
# unlabeled_matching_scores *= 10
# matching_scores = torch.cat((labeled_matching_scores, unlabeled_matching_scores), dim=1)
# pid_labels = pos_reid_ids.clone()
# pid_labels[pid_labels == -2] = -1
# p_i = F.softmax(matching_scores, dim=1)
# focal_p_i = (1 - p_i)**2 * p_i.log()
# #loss_oim = F.nll_loss(focal_p_i, pid_labels, reduction='none', ignore_index=-1)
# loss_oim = F.nll_loss(focal_p_i, pid_labels, ignore_index=-1)
# pos_reid = torch.cat((pos_reid, labeled_matching_reid), dim=0)
# pid_labels = torch.cat((pid_labels, labeled_matching_ids), dim=0)
# loss_tri = self.loss_tri(pos_reid, pid_labels)
# single_losses["loss_oim_singel"] = loss_oim
# single_losses["loss_tri"] = loss_tri
####### calculate mutual information ############
feats_roi = feats_pids_roi["bbox_feats"]
pids_roi = feats_pids_roi["gt_pids"]
feats_fcos = feats_pids["pos_reid"]
pids_fcos = feats_pids["pos_reid_ids"]
dic1 = defaultdict(list)
dic2 = defaultdict(list)
for i in range(len(pids_roi)):
if pids_roi[i] < 0:
continue
else:
targets1_value = pids_roi[i].cpu().numpy().item()
dic1[targets1_value].append(feats_roi[i])
for i in range(len(pids_fcos)):
if pids_fcos[i] < 0:
continue
else:
targets2_value = pids_fcos[i].cpu().numpy().item()
dic2[targets2_value].append(feats_fcos[i])
all_feats1 = []
all_feats2 = []
for key, val in dic1.items():
if key in dic2:
val2 = dic2[key]
feat1 = sum(val)/len(val)
# print(feat1.shape)
mean1 = F.normalize(feat1.unsqueeze(0))
# mean1 = feat1.unsqueeze(0)
feat2 = sum(val2)/len(val2)
mean2 = F.normalize(feat2.unsqueeze(0))
# mean2 = feat2.unsqueeze(0)
all_feats1.append(mean1)
all_feats2.append(mean2)
if len(all_feats1) > 0 and len(all_feats2) >0:
all_feats1 = torch.cat(all_feats1)
all_feats2 = torch.cat(all_feats2)
# print(all_feats1.shape, all_feats2.shape)
all_feats1_d = all_feats1.detach()
all_feats2_d = all_feats2.detach()
mi_loss = dict()
if torch.randint(1, 100, (1,)) % 3:
self.mi_estimator.train()
mi_loss["loss_mi"] = 0.2 * self.mi_estimator.learning_loss(all_feats1_d, all_feats2_d)
else:
self.mi_estimator.eval()
# mi_loss["loss_mi_bound"] = self.mi_estimator(all_feats1, all_feats2)
mi_loss["loss_mi_bound"] = 0.2 * self.mi_estimator.learning_loss(all_feats1, all_feats2)
losses.update(mi_loss)
# losses.update(single_losses)
for key, val in single_losses.items():
if key in losses:
#print("losses", key, losses[key], losses[key].shape)
#print("val", val, val.shape)
losses[key] += val
else:
losses[key] = val
return losses
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
xb, xn = self.extract_feat(img)
outs_n = self.bbox_head(xn, proposals)
bbox_list = self.bbox_head.get_bboxes(
*outs_n, img_metas, rescale=rescale)
if bbox_list[0][0].shape[0] == 0:
return None, None
# skip post-processing when exporting to ONNX
if torch.onnx.is_in_onnx_export():
return bbox_list
bbox_results_n = [
bbox2result_reid(det_bboxes, det_labels, reid_feats, self.bbox_head.num_classes)
for det_bboxes, det_labels, reid_feats in bbox_list
]
proposals = []
#print('scale_factor')
#print(img_metas[0]['scale_factor']) img_metas[0]['scale_factor']
# print('scale',img_metas.data[0][0]['scale_factor'])
tmp_sf = torch.FloatTensor(np.append(img_metas[0]['scale_factor'], 1)).to('cuda')
#print(tmp_sf)
for det_bboxes, det_labels, reid_feats in bbox_list:
#print("det_bboxes")
#print(det_bboxes.shape)
#print(det_bboxes)
proposals.append(torch.mul(det_bboxes, tmp_sf))
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(xb, img_metas)
else:
proposal_list = proposals
#print("img_metas")
#print(img_metas)
#print("proposal_list")
#print(proposal_list[0].shape)
#print(proposal_list)
bbox_results_b, det_features = self.roi_head.simple_test(
xb, proposal_list, img_metas, rescale=rescale, use_rpn=False)
#print("results_b")
#print("det_features")
#print(len(det_features))
#print(det_features.shape)
bbox_results_b = []
#print("bbox_results_n")
#print(bbox_results_n[0][0].shape)
#print(bbox_results_n[0][0][:, 5:])
bbox_results_b.append(bbox_results_n[0][0].copy())
bbox_results_b[0][:, 5:] = det_features.cpu().numpy()
#print(bbox_results_b[0][:, 5:])
#print(bbox_results_n[0][0][:, 5:])
#print(len(bbox_results_b))
#print(bbox_results_b[0].shape)
#print(bbox_results_b[0][:,:5])
return bbox_results_n, bbox_results_b
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
# recompute feats to save memory
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
| with_rpn | identifier_name |
fabric.go | //(C) Copyright [2020] Hewlett Packard Enterprise Development LP
//
//Licensed under the Apache License, Version 2.0 (the "License"); you may
//not use this file except in compliance with the License. You may obtain
//a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
//WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
//License for the specific language governing permissions and limitations
// under the License.
package chassis
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
dmtf "github.com/ODIM-Project/ODIM/lib-dmtf/model"
"github.com/ODIM-Project/ODIM/lib-rest-client/pmbhandle"
"github.com/ODIM-Project/ODIM/lib-utilities/common"
"github.com/ODIM-Project/ODIM/lib-utilities/config"
l "github.com/ODIM-Project/ODIM/lib-utilities/logs"
"github.com/ODIM-Project/ODIM/lib-utilities/response"
"github.com/ODIM-Project/ODIM/svc-systems/smodel"
"github.com/ODIM-Project/ODIM/svc-systems/sresponse"
)
type fabricFactory struct {
collection *sresponse.Collection
chassisMap map[string]bool
wg *sync.WaitGroup
mu *sync.RWMutex
getFabricManagers func(context.Context) ([]smodel.Plugin, error)
contactClient func(context.Context, string, string, string, string, interface{}, map[string]string) (*http.Response, error)
}
func getFabricFactory(collection *sresponse.Collection) *fabricFactory {
chassisMap := make(map[string]bool)
return &fabricFactory{
collection: collection,
chassisMap: chassisMap,
wg: &sync.WaitGroup{},
mu: &sync.RWMutex{},
getFabricManagers: smodel.GetFabricManagers,
contactClient: pmbhandle.ContactPlugin,
}
}
type pluginContactRequest struct {
URL string
HTTPMethodType string
ContactClient func(context.Context, string, string, string, string, interface{}, map[string]string) (*http.Response, error)
PostBody interface{}
LoginCredential map[string]string
Plugin smodel.Plugin
Token string
}
// PluginToken interface to hold the token
type PluginToken struct {
Tokens map[string]string
lock sync.RWMutex
}
// Token variable hold the all the XAuthToken against the plguin ID
var Token PluginToken
func (c *sourceProviderImpl) findFabricChassis(ctx context.Context, collection *sresponse.Collection) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.findFabricChassis")
f := c.getFabricFactory(collection)
managers, err := f.getFabricManagers(ctx)
if err != nil {
l.LogWithFields(ctx).Warn("while trying to collect fabric managers details from DB, got " + err.Error())
return
}
threadID := 1
for _, manager := range managers {
ctxt := context.WithValue(ctx, common.ThreadName, common.GetFabricManagerChassis)
ctx = context.WithValue(ctxt, common.ThreadID, strconv.Itoa(threadID))
f.wg.Add(1)
go f.getFabricManagerChassis(ctx, manager)
threadID++
}
f.wg.Wait()
}
// getFabricManagerChassis will send a request to the plugin for the chassis collection,
// and add them to the existing chassis collection.
func (f *fabricFactory) getFabricManagerChassis(ctx context.Context, plugin smodel.Plugin) |
// createChassisRequest creates the parameters ready for the plugin communication
func (f *fabricFactory) createChassisRequest(ctx context.Context, plugin smodel.Plugin, url, method string, body *json.RawMessage) (pReq *pluginContactRequest, errResp *response.RPC, err error) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.createChassisRequest")
var token string
cred := make(map[string]string)
if strings.EqualFold(plugin.PreferredAuthType, "XAuthToken") {
token = f.getPluginToken(ctx, plugin)
if token == "" {
*errResp = common.GeneralError(http.StatusUnauthorized, response.ResourceAtURIUnauthorized, "unable to create session for plugin "+plugin.ID, []interface{}{url}, nil)
return nil, errResp, fmt.Errorf("unable to create session for plugin " + plugin.ID)
}
} else {
cred["UserName"] = plugin.Username
cred["Password"] = string(plugin.Password)
}
// validating Patch request properties are in uppercamelcase or not
if strings.EqualFold(method, http.MethodPatch) {
errResp = validateReqParamsCase(ctx, body)
if errResp != nil {
return nil, errResp, fmt.Errorf("validation of request body failed")
}
}
for key, value := range config.Data.URLTranslation.SouthBoundURL {
if body != nil {
*body = json.RawMessage(strings.Replace(string(*body), key, value, -1))
}
url = strings.Replace(url, key, value, -1)
}
pReq = &pluginContactRequest{
Token: token,
LoginCredential: cred,
ContactClient: f.contactClient,
Plugin: plugin,
HTTPMethodType: method,
URL: url,
PostBody: body,
}
return pReq, nil, nil
}
// collectChassisCollection contacts the plugin and collect the chassis response
func collectChassisCollection(ctx context.Context, f *fabricFactory, pluginRequest *pluginContactRequest) ([]dmtf.Link, error) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.collectChassisCollection")
body, _, statusCode, _, err := ContactPluginFunc(ctx, pluginRequest)
if statusCode == http.StatusUnauthorized && strings.EqualFold(pluginRequest.Plugin.PreferredAuthType, "XAuthToken") {
body, _, statusCode, _, err = retryFabricsOperation(ctx, f, pluginRequest)
}
if err != nil {
return []dmtf.Link{}, fmt.Errorf("while trying contact plugin " + pluginRequest.Plugin.ID + ", got " + err.Error())
}
if !is2xx(statusCode) {
return []dmtf.Link{}, fmt.Errorf("while trying contact plugin " + pluginRequest.Plugin.ID + ", got " + strconv.Itoa(statusCode))
}
return extractChassisCollection(ctx, body)
}
func contactPlugin(ctx context.Context, req *pluginContactRequest) ([]byte, string, int, string, error) {
pluginResponse, err := callPlugin(ctx, req)
if err != nil {
if getPluginStatus(ctx, req.Plugin) {
pluginResponse, err = callPlugin(ctx, req)
}
if err != nil {
return nil, "", http.StatusInternalServerError, response.InternalError, fmt.Errorf(err.Error())
}
}
defer pluginResponse.Body.Close()
body, err := ioutil.ReadAll(pluginResponse.Body)
if err != nil {
return nil, "", http.StatusInternalServerError, response.InternalError, fmt.Errorf(err.Error())
}
var statusMessage string
switch pluginResponse.StatusCode {
case http.StatusOK:
statusMessage = response.Success
case http.StatusUnauthorized:
statusMessage = response.ResourceAtURIUnauthorized
case http.StatusNotFound:
statusMessage = response.ResourceNotFound
default:
statusMessage = response.CouldNotEstablishConnection
}
return body, pluginResponse.Header.Get("X-Auth-Token"), pluginResponse.StatusCode, statusMessage, nil
}
// retryFabricsOperation will be called whenever the unauthorized status code during the plugin call
// This function will create a new session token reexcutes the plugin call
func retryFabricsOperation(ctx context.Context, f *fabricFactory, req *pluginContactRequest) ([]byte, string, int, string, error) {
var resp response.RPC
var token = f.createToken(ctx, req.Plugin)
if token == "" {
resp = common.GeneralError(http.StatusUnauthorized, response.NoValidSession, "error: Unable to create session with plugin "+req.Plugin.ID,
[]interface{}{}, nil)
data, _ := json.Marshal(resp.Body)
return data, "", int(resp.StatusCode), response.NoValidSession, fmt.Errorf("error: Unable to create session with plugin")
}
req.Token = token
return contactPlugin(ctx, req)
}
func callPlugin(ctx context.Context, req *pluginContactRequest) (*http.Response, error) {
var reqURL = "https://" + req.Plugin.IP + ":" + req.Plugin.Port + req.URL
if strings.EqualFold(req.Plugin.PreferredAuthType, "BasicAuth") {
return req.ContactClient(ctx, reqURL, req.HTTPMethodType, "", "", req.PostBody, req.LoginCredential)
}
return req.ContactClient(ctx, reqURL, req.HTTPMethodType, req.Token, "", req.PostBody, nil)
}
// getPluginStatus checks the status of given plugin in configured interval
func getPluginStatus(ctx context.Context, plugin smodel.Plugin) bool {
var pluginStatus = common.PluginStatus{
Method: http.MethodGet,
RequestBody: common.StatusRequest{
Comment: "",
},
PluginIP: plugin.IP,
PluginPort: plugin.Port,
ResponseWaitTime: config.Data.PluginStatusPolling.ResponseTimeoutInSecs,
Count: config.Data.PluginStatusPolling.MaxRetryAttempt,
RetryInterval: config.Data.PluginStatusPolling.RetryIntervalInMins,
CACertificate: &config.Data.KeyCertConf.RootCACertificate,
}
status, _, _, err := pluginStatus.CheckStatus()
if err != nil && !status {
l.LogWithFields(ctx).Warn("while getting the status for plugin " + plugin.ID + err.Error())
return status
}
l.LogWithFields(ctx).Info("Status of plugin" + plugin.ID + strconv.FormatBool(status))
return status
}
// getPluginToken will verify the if any token present to the plugin else it will create token for the new plugin
func (f *fabricFactory) getPluginToken(ctx context.Context, plugin smodel.Plugin) string {
authToken := Token.getToken(plugin.ID)
if authToken == "" {
return f.createToken(ctx, plugin)
}
l.LogWithFields(ctx).Debugf("Plugin Token: " + authToken)
return authToken
}
func (f *fabricFactory) createToken(ctx context.Context, plugin smodel.Plugin) string {
var contactRequest pluginContactRequest
contactRequest.ContactClient = f.contactClient
contactRequest.Plugin = plugin
contactRequest.HTTPMethodType = http.MethodPost
contactRequest.PostBody = map[string]interface{}{
"Username": plugin.Username,
"Password": string(plugin.Password),
}
contactRequest.URL = "/ODIM/v1/Sessions"
_, token, _, _, err := contactPlugin(ctx, &contactRequest)
if err != nil {
l.LogWithFields(ctx).Error(err.Error())
}
if token != "" {
Token.storeToken(plugin.ID, token)
}
return token
}
func (p *PluginToken) storeToken(plguinID, token string) {
p.lock.Lock()
defer p.lock.Unlock()
p.Tokens[plguinID] = token
}
func (p *PluginToken) getToken(pluginID string) string {
p.lock.RLock()
defer p.lock.RUnlock()
return p.Tokens[pluginID]
}
// extractChassisCollection unmarshals the plugin response and returns the collection members
func extractChassisCollection(ctx context.Context, body []byte) ([]dmtf.Link, error) {
var resp sresponse.Collection
data := string(body)
//replacing the resposne with north bound translation URL
for key, value := range config.Data.URLTranslation.NorthBoundURL {
data = strings.Replace(data, key, value, -1)
}
err := json.Unmarshal([]byte(data), &resp)
if err != nil {
return resp.Members, fmt.Errorf("while unmarshalling the chassis fabric collection, got: %v", err)
}
l.LogWithFields(ctx).Debugf("outgoing response for get Chassis Collection request: %s", resp.Members)
return resp.Members, nil
}
func is2xx(status int) bool {
return status/100 == 2
}
| {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.getFabricManagerChassis")
defer f.wg.Done()
req, errResp, err := f.createChassisRequest(ctx, plugin, collectionURL, http.MethodGet, nil)
if errResp != nil {
l.LogWithFields(ctx).Warn("while trying to create fabric plugin request for " + plugin.ID + ", got " + err.Error())
return
}
links, err := collectChassisCollection(ctx, f, req)
if err != nil {
l.LogWithFields(ctx).Warn("while trying to create fabric plugin request for " + plugin.ID + ", got " + err.Error())
return
}
for _, link := range links {
f.mu.Lock()
if !f.chassisMap[link.Oid] { // uniqueness check for the chassis URI
f.chassisMap[link.Oid] = true
f.collection.AddMember(link)
}
f.mu.Unlock()
}
} | identifier_body |
fabric.go | //(C) Copyright [2020] Hewlett Packard Enterprise Development LP
//
//Licensed under the Apache License, Version 2.0 (the "License"); you may
//not use this file except in compliance with the License. You may obtain
//a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
//WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
//License for the specific language governing permissions and limitations
// under the License.
package chassis
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
dmtf "github.com/ODIM-Project/ODIM/lib-dmtf/model"
"github.com/ODIM-Project/ODIM/lib-rest-client/pmbhandle"
"github.com/ODIM-Project/ODIM/lib-utilities/common"
"github.com/ODIM-Project/ODIM/lib-utilities/config"
l "github.com/ODIM-Project/ODIM/lib-utilities/logs"
"github.com/ODIM-Project/ODIM/lib-utilities/response"
"github.com/ODIM-Project/ODIM/svc-systems/smodel"
"github.com/ODIM-Project/ODIM/svc-systems/sresponse"
)
type fabricFactory struct {
collection *sresponse.Collection
chassisMap map[string]bool
wg *sync.WaitGroup
mu *sync.RWMutex
getFabricManagers func(context.Context) ([]smodel.Plugin, error)
contactClient func(context.Context, string, string, string, string, interface{}, map[string]string) (*http.Response, error)
}
func getFabricFactory(collection *sresponse.Collection) *fabricFactory {
chassisMap := make(map[string]bool)
return &fabricFactory{
collection: collection,
chassisMap: chassisMap,
wg: &sync.WaitGroup{},
mu: &sync.RWMutex{},
getFabricManagers: smodel.GetFabricManagers,
contactClient: pmbhandle.ContactPlugin,
}
}
type pluginContactRequest struct {
URL string
HTTPMethodType string
ContactClient func(context.Context, string, string, string, string, interface{}, map[string]string) (*http.Response, error)
PostBody interface{}
LoginCredential map[string]string
Plugin smodel.Plugin
Token string
}
// PluginToken interface to hold the token
type PluginToken struct {
Tokens map[string]string
lock sync.RWMutex
}
// Token variable hold the all the XAuthToken against the plguin ID
var Token PluginToken
func (c *sourceProviderImpl) findFabricChassis(ctx context.Context, collection *sresponse.Collection) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.findFabricChassis")
f := c.getFabricFactory(collection)
managers, err := f.getFabricManagers(ctx)
if err != nil {
l.LogWithFields(ctx).Warn("while trying to collect fabric managers details from DB, got " + err.Error())
return
}
threadID := 1
for _, manager := range managers {
ctxt := context.WithValue(ctx, common.ThreadName, common.GetFabricManagerChassis)
ctx = context.WithValue(ctxt, common.ThreadID, strconv.Itoa(threadID))
f.wg.Add(1)
go f.getFabricManagerChassis(ctx, manager)
threadID++
}
f.wg.Wait()
}
// getFabricManagerChassis will send a request to the plugin for the chassis collection,
// and add them to the existing chassis collection.
func (f *fabricFactory) getFabricManagerChassis(ctx context.Context, plugin smodel.Plugin) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.getFabricManagerChassis")
defer f.wg.Done()
req, errResp, err := f.createChassisRequest(ctx, plugin, collectionURL, http.MethodGet, nil)
if errResp != nil {
l.LogWithFields(ctx).Warn("while trying to create fabric plugin request for " + plugin.ID + ", got " + err.Error())
return
}
links, err := collectChassisCollection(ctx, f, req)
if err != nil {
l.LogWithFields(ctx).Warn("while trying to create fabric plugin request for " + plugin.ID + ", got " + err.Error())
return
}
for _, link := range links {
f.mu.Lock()
if !f.chassisMap[link.Oid] { // uniqueness check for the chassis URI
f.chassisMap[link.Oid] = true
f.collection.AddMember(link)
}
f.mu.Unlock()
}
}
// createChassisRequest creates the parameters ready for the plugin communication
func (f *fabricFactory) createChassisRequest(ctx context.Context, plugin smodel.Plugin, url, method string, body *json.RawMessage) (pReq *pluginContactRequest, errResp *response.RPC, err error) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.createChassisRequest")
var token string
cred := make(map[string]string)
if strings.EqualFold(plugin.PreferredAuthType, "XAuthToken") {
token = f.getPluginToken(ctx, plugin)
if token == "" {
*errResp = common.GeneralError(http.StatusUnauthorized, response.ResourceAtURIUnauthorized, "unable to create session for plugin "+plugin.ID, []interface{}{url}, nil)
return nil, errResp, fmt.Errorf("unable to create session for plugin " + plugin.ID)
}
} else {
cred["UserName"] = plugin.Username
cred["Password"] = string(plugin.Password)
}
// validating Patch request properties are in uppercamelcase or not
if strings.EqualFold(method, http.MethodPatch) {
errResp = validateReqParamsCase(ctx, body)
if errResp != nil {
return nil, errResp, fmt.Errorf("validation of request body failed")
}
}
for key, value := range config.Data.URLTranslation.SouthBoundURL {
if body != nil {
*body = json.RawMessage(strings.Replace(string(*body), key, value, -1))
}
url = strings.Replace(url, key, value, -1)
}
pReq = &pluginContactRequest{
Token: token,
LoginCredential: cred,
ContactClient: f.contactClient,
Plugin: plugin,
HTTPMethodType: method,
URL: url,
PostBody: body,
}
return pReq, nil, nil
}
// collectChassisCollection contacts the plugin and collect the chassis response
func collectChassisCollection(ctx context.Context, f *fabricFactory, pluginRequest *pluginContactRequest) ([]dmtf.Link, error) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.collectChassisCollection")
body, _, statusCode, _, err := ContactPluginFunc(ctx, pluginRequest)
if statusCode == http.StatusUnauthorized && strings.EqualFold(pluginRequest.Plugin.PreferredAuthType, "XAuthToken") {
body, _, statusCode, _, err = retryFabricsOperation(ctx, f, pluginRequest)
}
if err != nil {
return []dmtf.Link{}, fmt.Errorf("while trying contact plugin " + pluginRequest.Plugin.ID + ", got " + err.Error())
}
if !is2xx(statusCode) {
return []dmtf.Link{}, fmt.Errorf("while trying contact plugin " + pluginRequest.Plugin.ID + ", got " + strconv.Itoa(statusCode))
}
return extractChassisCollection(ctx, body)
}
func contactPlugin(ctx context.Context, req *pluginContactRequest) ([]byte, string, int, string, error) {
pluginResponse, err := callPlugin(ctx, req)
if err != nil {
if getPluginStatus(ctx, req.Plugin) {
pluginResponse, err = callPlugin(ctx, req)
}
if err != nil {
return nil, "", http.StatusInternalServerError, response.InternalError, fmt.Errorf(err.Error())
}
}
defer pluginResponse.Body.Close()
body, err := ioutil.ReadAll(pluginResponse.Body)
if err != nil {
return nil, "", http.StatusInternalServerError, response.InternalError, fmt.Errorf(err.Error())
}
var statusMessage string
switch pluginResponse.StatusCode {
case http.StatusOK:
statusMessage = response.Success
case http.StatusUnauthorized:
statusMessage = response.ResourceAtURIUnauthorized
case http.StatusNotFound:
statusMessage = response.ResourceNotFound
default:
statusMessage = response.CouldNotEstablishConnection
}
return body, pluginResponse.Header.Get("X-Auth-Token"), pluginResponse.StatusCode, statusMessage, nil
}
// retryFabricsOperation will be called whenever the unauthorized status code during the plugin call
// This function will create a new session token reexcutes the plugin call
func retryFabricsOperation(ctx context.Context, f *fabricFactory, req *pluginContactRequest) ([]byte, string, int, string, error) {
var resp response.RPC
var token = f.createToken(ctx, req.Plugin)
if token == "" {
resp = common.GeneralError(http.StatusUnauthorized, response.NoValidSession, "error: Unable to create session with plugin "+req.Plugin.ID,
[]interface{}{}, nil)
data, _ := json.Marshal(resp.Body)
return data, "", int(resp.StatusCode), response.NoValidSession, fmt.Errorf("error: Unable to create session with plugin")
}
req.Token = token
return contactPlugin(ctx, req)
}
func callPlugin(ctx context.Context, req *pluginContactRequest) (*http.Response, error) {
var reqURL = "https://" + req.Plugin.IP + ":" + req.Plugin.Port + req.URL
if strings.EqualFold(req.Plugin.PreferredAuthType, "BasicAuth") {
return req.ContactClient(ctx, reqURL, req.HTTPMethodType, "", "", req.PostBody, req.LoginCredential)
}
return req.ContactClient(ctx, reqURL, req.HTTPMethodType, req.Token, "", req.PostBody, nil)
}
// getPluginStatus checks the status of given plugin in configured interval
func getPluginStatus(ctx context.Context, plugin smodel.Plugin) bool {
var pluginStatus = common.PluginStatus{
Method: http.MethodGet,
RequestBody: common.StatusRequest{
Comment: "",
},
PluginIP: plugin.IP,
PluginPort: plugin.Port,
ResponseWaitTime: config.Data.PluginStatusPolling.ResponseTimeoutInSecs,
Count: config.Data.PluginStatusPolling.MaxRetryAttempt,
RetryInterval: config.Data.PluginStatusPolling.RetryIntervalInMins,
CACertificate: &config.Data.KeyCertConf.RootCACertificate,
}
status, _, _, err := pluginStatus.CheckStatus()
if err != nil && !status {
l.LogWithFields(ctx).Warn("while getting the status for plugin " + plugin.ID + err.Error())
return status
}
l.LogWithFields(ctx).Info("Status of plugin" + plugin.ID + strconv.FormatBool(status))
return status
}
// getPluginToken will verify the if any token present to the plugin else it will create token for the new plugin
func (f *fabricFactory) getPluginToken(ctx context.Context, plugin smodel.Plugin) string {
authToken := Token.getToken(plugin.ID)
if authToken == "" {
return f.createToken(ctx, plugin)
}
l.LogWithFields(ctx).Debugf("Plugin Token: " + authToken)
return authToken
}
func (f *fabricFactory) createToken(ctx context.Context, plugin smodel.Plugin) string {
var contactRequest pluginContactRequest
contactRequest.ContactClient = f.contactClient
contactRequest.Plugin = plugin
contactRequest.HTTPMethodType = http.MethodPost
contactRequest.PostBody = map[string]interface{}{
"Username": plugin.Username,
"Password": string(plugin.Password),
}
contactRequest.URL = "/ODIM/v1/Sessions"
_, token, _, _, err := contactPlugin(ctx, &contactRequest)
if err != nil {
l.LogWithFields(ctx).Error(err.Error())
}
if token != "" {
Token.storeToken(plugin.ID, token)
}
return token
}
func (p *PluginToken) storeToken(plguinID, token string) {
p.lock.Lock()
defer p.lock.Unlock()
p.Tokens[plguinID] = token
}
func (p *PluginToken) getToken(pluginID string) string {
p.lock.RLock()
defer p.lock.RUnlock()
return p.Tokens[pluginID]
}
// extractChassisCollection unmarshals the plugin response and returns the collection members
func extractChassisCollection(ctx context.Context, body []byte) ([]dmtf.Link, error) {
var resp sresponse.Collection
data := string(body)
//replacing the resposne with north bound translation URL
for key, value := range config.Data.URLTranslation.NorthBoundURL {
data = strings.Replace(data, key, value, -1)
}
err := json.Unmarshal([]byte(data), &resp)
if err != nil {
return resp.Members, fmt.Errorf("while unmarshalling the chassis fabric collection, got: %v", err)
}
l.LogWithFields(ctx).Debugf("outgoing response for get Chassis Collection request: %s", resp.Members)
return resp.Members, nil
}
func | (status int) bool {
return status/100 == 2
}
| is2xx | identifier_name |
fabric.go | //(C) Copyright [2020] Hewlett Packard Enterprise Development LP
//
//Licensed under the Apache License, Version 2.0 (the "License"); you may
//not use this file except in compliance with the License. You may obtain
//a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
//WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
//License for the specific language governing permissions and limitations
// under the License.
package chassis
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
dmtf "github.com/ODIM-Project/ODIM/lib-dmtf/model"
"github.com/ODIM-Project/ODIM/lib-rest-client/pmbhandle"
"github.com/ODIM-Project/ODIM/lib-utilities/common"
"github.com/ODIM-Project/ODIM/lib-utilities/config"
l "github.com/ODIM-Project/ODIM/lib-utilities/logs"
"github.com/ODIM-Project/ODIM/lib-utilities/response"
"github.com/ODIM-Project/ODIM/svc-systems/smodel"
"github.com/ODIM-Project/ODIM/svc-systems/sresponse"
)
type fabricFactory struct {
collection *sresponse.Collection
chassisMap map[string]bool
wg *sync.WaitGroup
mu *sync.RWMutex
getFabricManagers func(context.Context) ([]smodel.Plugin, error)
contactClient func(context.Context, string, string, string, string, interface{}, map[string]string) (*http.Response, error)
}
func getFabricFactory(collection *sresponse.Collection) *fabricFactory {
chassisMap := make(map[string]bool)
return &fabricFactory{
collection: collection,
chassisMap: chassisMap,
wg: &sync.WaitGroup{},
mu: &sync.RWMutex{},
getFabricManagers: smodel.GetFabricManagers,
contactClient: pmbhandle.ContactPlugin,
}
}
type pluginContactRequest struct {
URL string
HTTPMethodType string
ContactClient func(context.Context, string, string, string, string, interface{}, map[string]string) (*http.Response, error)
PostBody interface{}
LoginCredential map[string]string
Plugin smodel.Plugin
Token string
}
// PluginToken interface to hold the token
type PluginToken struct {
Tokens map[string]string
lock sync.RWMutex
}
// Token variable hold the all the XAuthToken against the plguin ID
var Token PluginToken
func (c *sourceProviderImpl) findFabricChassis(ctx context.Context, collection *sresponse.Collection) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.findFabricChassis")
f := c.getFabricFactory(collection)
managers, err := f.getFabricManagers(ctx)
if err != nil {
l.LogWithFields(ctx).Warn("while trying to collect fabric managers details from DB, got " + err.Error())
return
}
threadID := 1
for _, manager := range managers {
ctxt := context.WithValue(ctx, common.ThreadName, common.GetFabricManagerChassis)
ctx = context.WithValue(ctxt, common.ThreadID, strconv.Itoa(threadID))
f.wg.Add(1)
go f.getFabricManagerChassis(ctx, manager)
threadID++
}
f.wg.Wait()
}
// getFabricManagerChassis will send a request to the plugin for the chassis collection,
// and add them to the existing chassis collection.
func (f *fabricFactory) getFabricManagerChassis(ctx context.Context, plugin smodel.Plugin) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.getFabricManagerChassis")
defer f.wg.Done()
req, errResp, err := f.createChassisRequest(ctx, plugin, collectionURL, http.MethodGet, nil)
if errResp != nil {
l.LogWithFields(ctx).Warn("while trying to create fabric plugin request for " + plugin.ID + ", got " + err.Error())
return
}
links, err := collectChassisCollection(ctx, f, req)
if err != nil {
l.LogWithFields(ctx).Warn("while trying to create fabric plugin request for " + plugin.ID + ", got " + err.Error())
return
}
for _, link := range links {
f.mu.Lock()
if !f.chassisMap[link.Oid] { // uniqueness check for the chassis URI
f.chassisMap[link.Oid] = true
f.collection.AddMember(link)
}
f.mu.Unlock()
}
}
// createChassisRequest creates the parameters ready for the plugin communication
func (f *fabricFactory) createChassisRequest(ctx context.Context, plugin smodel.Plugin, url, method string, body *json.RawMessage) (pReq *pluginContactRequest, errResp *response.RPC, err error) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.createChassisRequest")
var token string
cred := make(map[string]string)
if strings.EqualFold(plugin.PreferredAuthType, "XAuthToken") {
token = f.getPluginToken(ctx, plugin)
if token == "" {
*errResp = common.GeneralError(http.StatusUnauthorized, response.ResourceAtURIUnauthorized, "unable to create session for plugin "+plugin.ID, []interface{}{url}, nil)
return nil, errResp, fmt.Errorf("unable to create session for plugin " + plugin.ID)
}
} else {
cred["UserName"] = plugin.Username
cred["Password"] = string(plugin.Password)
}
// validating Patch request properties are in uppercamelcase or not
if strings.EqualFold(method, http.MethodPatch) {
errResp = validateReqParamsCase(ctx, body)
if errResp != nil {
return nil, errResp, fmt.Errorf("validation of request body failed")
}
}
for key, value := range config.Data.URLTranslation.SouthBoundURL {
if body != nil {
*body = json.RawMessage(strings.Replace(string(*body), key, value, -1))
}
url = strings.Replace(url, key, value, -1)
}
pReq = &pluginContactRequest{
Token: token,
LoginCredential: cred,
ContactClient: f.contactClient,
Plugin: plugin,
HTTPMethodType: method,
URL: url,
PostBody: body,
}
return pReq, nil, nil
}
// collectChassisCollection contacts the plugin and collect the chassis response
func collectChassisCollection(ctx context.Context, f *fabricFactory, pluginRequest *pluginContactRequest) ([]dmtf.Link, error) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.collectChassisCollection")
body, _, statusCode, _, err := ContactPluginFunc(ctx, pluginRequest)
if statusCode == http.StatusUnauthorized && strings.EqualFold(pluginRequest.Plugin.PreferredAuthType, "XAuthToken") {
body, _, statusCode, _, err = retryFabricsOperation(ctx, f, pluginRequest)
}
if err != nil {
return []dmtf.Link{}, fmt.Errorf("while trying contact plugin " + pluginRequest.Plugin.ID + ", got " + err.Error())
}
if !is2xx(statusCode) {
return []dmtf.Link{}, fmt.Errorf("while trying contact plugin " + pluginRequest.Plugin.ID + ", got " + strconv.Itoa(statusCode))
}
return extractChassisCollection(ctx, body)
}
func contactPlugin(ctx context.Context, req *pluginContactRequest) ([]byte, string, int, string, error) {
pluginResponse, err := callPlugin(ctx, req)
if err != nil {
if getPluginStatus(ctx, req.Plugin) {
pluginResponse, err = callPlugin(ctx, req)
}
if err != nil {
return nil, "", http.StatusInternalServerError, response.InternalError, fmt.Errorf(err.Error())
}
}
defer pluginResponse.Body.Close()
body, err := ioutil.ReadAll(pluginResponse.Body)
if err != nil |
var statusMessage string
switch pluginResponse.StatusCode {
case http.StatusOK:
statusMessage = response.Success
case http.StatusUnauthorized:
statusMessage = response.ResourceAtURIUnauthorized
case http.StatusNotFound:
statusMessage = response.ResourceNotFound
default:
statusMessage = response.CouldNotEstablishConnection
}
return body, pluginResponse.Header.Get("X-Auth-Token"), pluginResponse.StatusCode, statusMessage, nil
}
// retryFabricsOperation will be called whenever the unauthorized status code during the plugin call
// This function will create a new session token reexcutes the plugin call
func retryFabricsOperation(ctx context.Context, f *fabricFactory, req *pluginContactRequest) ([]byte, string, int, string, error) {
var resp response.RPC
var token = f.createToken(ctx, req.Plugin)
if token == "" {
resp = common.GeneralError(http.StatusUnauthorized, response.NoValidSession, "error: Unable to create session with plugin "+req.Plugin.ID,
[]interface{}{}, nil)
data, _ := json.Marshal(resp.Body)
return data, "", int(resp.StatusCode), response.NoValidSession, fmt.Errorf("error: Unable to create session with plugin")
}
req.Token = token
return contactPlugin(ctx, req)
}
func callPlugin(ctx context.Context, req *pluginContactRequest) (*http.Response, error) {
var reqURL = "https://" + req.Plugin.IP + ":" + req.Plugin.Port + req.URL
if strings.EqualFold(req.Plugin.PreferredAuthType, "BasicAuth") {
return req.ContactClient(ctx, reqURL, req.HTTPMethodType, "", "", req.PostBody, req.LoginCredential)
}
return req.ContactClient(ctx, reqURL, req.HTTPMethodType, req.Token, "", req.PostBody, nil)
}
// getPluginStatus checks the status of given plugin in configured interval
func getPluginStatus(ctx context.Context, plugin smodel.Plugin) bool {
var pluginStatus = common.PluginStatus{
Method: http.MethodGet,
RequestBody: common.StatusRequest{
Comment: "",
},
PluginIP: plugin.IP,
PluginPort: plugin.Port,
ResponseWaitTime: config.Data.PluginStatusPolling.ResponseTimeoutInSecs,
Count: config.Data.PluginStatusPolling.MaxRetryAttempt,
RetryInterval: config.Data.PluginStatusPolling.RetryIntervalInMins,
CACertificate: &config.Data.KeyCertConf.RootCACertificate,
}
status, _, _, err := pluginStatus.CheckStatus()
if err != nil && !status {
l.LogWithFields(ctx).Warn("while getting the status for plugin " + plugin.ID + err.Error())
return status
}
l.LogWithFields(ctx).Info("Status of plugin" + plugin.ID + strconv.FormatBool(status))
return status
}
// getPluginToken will verify the if any token present to the plugin else it will create token for the new plugin
func (f *fabricFactory) getPluginToken(ctx context.Context, plugin smodel.Plugin) string {
authToken := Token.getToken(plugin.ID)
if authToken == "" {
return f.createToken(ctx, plugin)
}
l.LogWithFields(ctx).Debugf("Plugin Token: " + authToken)
return authToken
}
func (f *fabricFactory) createToken(ctx context.Context, plugin smodel.Plugin) string {
var contactRequest pluginContactRequest
contactRequest.ContactClient = f.contactClient
contactRequest.Plugin = plugin
contactRequest.HTTPMethodType = http.MethodPost
contactRequest.PostBody = map[string]interface{}{
"Username": plugin.Username,
"Password": string(plugin.Password),
}
contactRequest.URL = "/ODIM/v1/Sessions"
_, token, _, _, err := contactPlugin(ctx, &contactRequest)
if err != nil {
l.LogWithFields(ctx).Error(err.Error())
}
if token != "" {
Token.storeToken(plugin.ID, token)
}
return token
}
func (p *PluginToken) storeToken(plguinID, token string) {
p.lock.Lock()
defer p.lock.Unlock()
p.Tokens[plguinID] = token
}
func (p *PluginToken) getToken(pluginID string) string {
p.lock.RLock()
defer p.lock.RUnlock()
return p.Tokens[pluginID]
}
// extractChassisCollection unmarshals the plugin response and returns the collection members
func extractChassisCollection(ctx context.Context, body []byte) ([]dmtf.Link, error) {
var resp sresponse.Collection
data := string(body)
//replacing the resposne with north bound translation URL
for key, value := range config.Data.URLTranslation.NorthBoundURL {
data = strings.Replace(data, key, value, -1)
}
err := json.Unmarshal([]byte(data), &resp)
if err != nil {
return resp.Members, fmt.Errorf("while unmarshalling the chassis fabric collection, got: %v", err)
}
l.LogWithFields(ctx).Debugf("outgoing response for get Chassis Collection request: %s", resp.Members)
return resp.Members, nil
}
func is2xx(status int) bool {
return status/100 == 2
}
| {
return nil, "", http.StatusInternalServerError, response.InternalError, fmt.Errorf(err.Error())
} | conditional_block |
fabric.go | //(C) Copyright [2020] Hewlett Packard Enterprise Development LP
//
//Licensed under the Apache License, Version 2.0 (the "License"); you may
//not use this file except in compliance with the License. You may obtain
//a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
//WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
//License for the specific language governing permissions and limitations
// under the License.
package chassis
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
dmtf "github.com/ODIM-Project/ODIM/lib-dmtf/model"
"github.com/ODIM-Project/ODIM/lib-rest-client/pmbhandle"
"github.com/ODIM-Project/ODIM/lib-utilities/common"
"github.com/ODIM-Project/ODIM/lib-utilities/config"
l "github.com/ODIM-Project/ODIM/lib-utilities/logs"
"github.com/ODIM-Project/ODIM/lib-utilities/response"
"github.com/ODIM-Project/ODIM/svc-systems/smodel"
"github.com/ODIM-Project/ODIM/svc-systems/sresponse"
)
type fabricFactory struct {
collection *sresponse.Collection
chassisMap map[string]bool
wg *sync.WaitGroup
mu *sync.RWMutex
getFabricManagers func(context.Context) ([]smodel.Plugin, error)
contactClient func(context.Context, string, string, string, string, interface{}, map[string]string) (*http.Response, error)
}
func getFabricFactory(collection *sresponse.Collection) *fabricFactory {
chassisMap := make(map[string]bool)
return &fabricFactory{
collection: collection,
chassisMap: chassisMap,
wg: &sync.WaitGroup{},
mu: &sync.RWMutex{},
getFabricManagers: smodel.GetFabricManagers,
contactClient: pmbhandle.ContactPlugin,
}
}
type pluginContactRequest struct {
URL string
HTTPMethodType string
ContactClient func(context.Context, string, string, string, string, interface{}, map[string]string) (*http.Response, error)
PostBody interface{}
LoginCredential map[string]string
Plugin smodel.Plugin
Token string
}
// PluginToken interface to hold the token
type PluginToken struct {
Tokens map[string]string
lock sync.RWMutex
}
// Token variable hold the all the XAuthToken against the plguin ID
var Token PluginToken
func (c *sourceProviderImpl) findFabricChassis(ctx context.Context, collection *sresponse.Collection) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.findFabricChassis")
f := c.getFabricFactory(collection)
managers, err := f.getFabricManagers(ctx)
if err != nil {
l.LogWithFields(ctx).Warn("while trying to collect fabric managers details from DB, got " + err.Error())
return
}
threadID := 1
for _, manager := range managers {
ctxt := context.WithValue(ctx, common.ThreadName, common.GetFabricManagerChassis)
ctx = context.WithValue(ctxt, common.ThreadID, strconv.Itoa(threadID))
f.wg.Add(1)
go f.getFabricManagerChassis(ctx, manager)
threadID++
}
f.wg.Wait()
}
// getFabricManagerChassis will send a request to the plugin for the chassis collection,
// and add them to the existing chassis collection.
func (f *fabricFactory) getFabricManagerChassis(ctx context.Context, plugin smodel.Plugin) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.getFabricManagerChassis")
defer f.wg.Done()
req, errResp, err := f.createChassisRequest(ctx, plugin, collectionURL, http.MethodGet, nil)
if errResp != nil {
l.LogWithFields(ctx).Warn("while trying to create fabric plugin request for " + plugin.ID + ", got " + err.Error())
return
}
links, err := collectChassisCollection(ctx, f, req)
if err != nil {
l.LogWithFields(ctx).Warn("while trying to create fabric plugin request for " + plugin.ID + ", got " + err.Error())
return
}
for _, link := range links {
f.mu.Lock()
if !f.chassisMap[link.Oid] { // uniqueness check for the chassis URI
f.chassisMap[link.Oid] = true
f.collection.AddMember(link)
}
f.mu.Unlock()
}
}
// createChassisRequest creates the parameters ready for the plugin communication
func (f *fabricFactory) createChassisRequest(ctx context.Context, plugin smodel.Plugin, url, method string, body *json.RawMessage) (pReq *pluginContactRequest, errResp *response.RPC, err error) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.createChassisRequest")
var token string
cred := make(map[string]string)
if strings.EqualFold(plugin.PreferredAuthType, "XAuthToken") {
token = f.getPluginToken(ctx, plugin)
if token == "" {
*errResp = common.GeneralError(http.StatusUnauthorized, response.ResourceAtURIUnauthorized, "unable to create session for plugin "+plugin.ID, []interface{}{url}, nil)
return nil, errResp, fmt.Errorf("unable to create session for plugin " + plugin.ID)
}
} else {
cred["UserName"] = plugin.Username
cred["Password"] = string(plugin.Password)
}
// validating Patch request properties are in uppercamelcase or not
if strings.EqualFold(method, http.MethodPatch) {
errResp = validateReqParamsCase(ctx, body)
if errResp != nil {
return nil, errResp, fmt.Errorf("validation of request body failed")
}
}
for key, value := range config.Data.URLTranslation.SouthBoundURL {
if body != nil {
*body = json.RawMessage(strings.Replace(string(*body), key, value, -1))
}
url = strings.Replace(url, key, value, -1)
}
pReq = &pluginContactRequest{
Token: token,
LoginCredential: cred,
ContactClient: f.contactClient,
Plugin: plugin,
HTTPMethodType: method,
URL: url,
PostBody: body,
}
return pReq, nil, nil
}
// collectChassisCollection contacts the plugin and collect the chassis response
func collectChassisCollection(ctx context.Context, f *fabricFactory, pluginRequest *pluginContactRequest) ([]dmtf.Link, error) {
l.LogWithFields(ctx).Debug("Inside svc-systems/chassis/fabric.go.collectChassisCollection")
body, _, statusCode, _, err := ContactPluginFunc(ctx, pluginRequest)
if statusCode == http.StatusUnauthorized && strings.EqualFold(pluginRequest.Plugin.PreferredAuthType, "XAuthToken") {
body, _, statusCode, _, err = retryFabricsOperation(ctx, f, pluginRequest)
}
if err != nil {
return []dmtf.Link{}, fmt.Errorf("while trying contact plugin " + pluginRequest.Plugin.ID + ", got " + err.Error())
}
if !is2xx(statusCode) {
return []dmtf.Link{}, fmt.Errorf("while trying contact plugin " + pluginRequest.Plugin.ID + ", got " + strconv.Itoa(statusCode))
}
return extractChassisCollection(ctx, body)
}
func contactPlugin(ctx context.Context, req *pluginContactRequest) ([]byte, string, int, string, error) {
pluginResponse, err := callPlugin(ctx, req)
if err != nil {
if getPluginStatus(ctx, req.Plugin) {
pluginResponse, err = callPlugin(ctx, req)
}
if err != nil {
return nil, "", http.StatusInternalServerError, response.InternalError, fmt.Errorf(err.Error())
}
}
defer pluginResponse.Body.Close()
body, err := ioutil.ReadAll(pluginResponse.Body)
if err != nil {
return nil, "", http.StatusInternalServerError, response.InternalError, fmt.Errorf(err.Error())
}
var statusMessage string
switch pluginResponse.StatusCode {
case http.StatusOK:
statusMessage = response.Success
case http.StatusUnauthorized:
statusMessage = response.ResourceAtURIUnauthorized
case http.StatusNotFound:
statusMessage = response.ResourceNotFound
default:
statusMessage = response.CouldNotEstablishConnection
}
return body, pluginResponse.Header.Get("X-Auth-Token"), pluginResponse.StatusCode, statusMessage, nil
}
// retryFabricsOperation will be called whenever the unauthorized status code during the plugin call
// This function will create a new session token reexcutes the plugin call
func retryFabricsOperation(ctx context.Context, f *fabricFactory, req *pluginContactRequest) ([]byte, string, int, string, error) {
var resp response.RPC
var token = f.createToken(ctx, req.Plugin) | }
req.Token = token
return contactPlugin(ctx, req)
}
func callPlugin(ctx context.Context, req *pluginContactRequest) (*http.Response, error) {
var reqURL = "https://" + req.Plugin.IP + ":" + req.Plugin.Port + req.URL
if strings.EqualFold(req.Plugin.PreferredAuthType, "BasicAuth") {
return req.ContactClient(ctx, reqURL, req.HTTPMethodType, "", "", req.PostBody, req.LoginCredential)
}
return req.ContactClient(ctx, reqURL, req.HTTPMethodType, req.Token, "", req.PostBody, nil)
}
// getPluginStatus checks the status of given plugin in configured interval
func getPluginStatus(ctx context.Context, plugin smodel.Plugin) bool {
var pluginStatus = common.PluginStatus{
Method: http.MethodGet,
RequestBody: common.StatusRequest{
Comment: "",
},
PluginIP: plugin.IP,
PluginPort: plugin.Port,
ResponseWaitTime: config.Data.PluginStatusPolling.ResponseTimeoutInSecs,
Count: config.Data.PluginStatusPolling.MaxRetryAttempt,
RetryInterval: config.Data.PluginStatusPolling.RetryIntervalInMins,
CACertificate: &config.Data.KeyCertConf.RootCACertificate,
}
status, _, _, err := pluginStatus.CheckStatus()
if err != nil && !status {
l.LogWithFields(ctx).Warn("while getting the status for plugin " + plugin.ID + err.Error())
return status
}
l.LogWithFields(ctx).Info("Status of plugin" + plugin.ID + strconv.FormatBool(status))
return status
}
// getPluginToken will verify the if any token present to the plugin else it will create token for the new plugin
func (f *fabricFactory) getPluginToken(ctx context.Context, plugin smodel.Plugin) string {
authToken := Token.getToken(plugin.ID)
if authToken == "" {
return f.createToken(ctx, plugin)
}
l.LogWithFields(ctx).Debugf("Plugin Token: " + authToken)
return authToken
}
func (f *fabricFactory) createToken(ctx context.Context, plugin smodel.Plugin) string {
var contactRequest pluginContactRequest
contactRequest.ContactClient = f.contactClient
contactRequest.Plugin = plugin
contactRequest.HTTPMethodType = http.MethodPost
contactRequest.PostBody = map[string]interface{}{
"Username": plugin.Username,
"Password": string(plugin.Password),
}
contactRequest.URL = "/ODIM/v1/Sessions"
_, token, _, _, err := contactPlugin(ctx, &contactRequest)
if err != nil {
l.LogWithFields(ctx).Error(err.Error())
}
if token != "" {
Token.storeToken(plugin.ID, token)
}
return token
}
func (p *PluginToken) storeToken(plguinID, token string) {
p.lock.Lock()
defer p.lock.Unlock()
p.Tokens[plguinID] = token
}
func (p *PluginToken) getToken(pluginID string) string {
p.lock.RLock()
defer p.lock.RUnlock()
return p.Tokens[pluginID]
}
// extractChassisCollection unmarshals the plugin response and returns the collection members
func extractChassisCollection(ctx context.Context, body []byte) ([]dmtf.Link, error) {
var resp sresponse.Collection
data := string(body)
//replacing the resposne with north bound translation URL
for key, value := range config.Data.URLTranslation.NorthBoundURL {
data = strings.Replace(data, key, value, -1)
}
err := json.Unmarshal([]byte(data), &resp)
if err != nil {
return resp.Members, fmt.Errorf("while unmarshalling the chassis fabric collection, got: %v", err)
}
l.LogWithFields(ctx).Debugf("outgoing response for get Chassis Collection request: %s", resp.Members)
return resp.Members, nil
}
func is2xx(status int) bool {
return status/100 == 2
} | if token == "" {
resp = common.GeneralError(http.StatusUnauthorized, response.NoValidSession, "error: Unable to create session with plugin "+req.Plugin.ID,
[]interface{}{}, nil)
data, _ := json.Marshal(resp.Body)
return data, "", int(resp.StatusCode), response.NoValidSession, fmt.Errorf("error: Unable to create session with plugin") | random_line_split |
p2p.pb.go | // Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go.
// source: proto/p2p/p2p.proto
// DO NOT EDIT!
/*
Package p2p is a generated protocol buffer package.
It is generated from these files:
proto/p2p/p2p.proto
It has these top-level messages:
BitfieldMessage
PieceRequestMessage
PiecePayloadMessage
AnnouncePieceMessage
CancelPieceMessage
ErrorMessage
CompleteMessage
Message
*/
package p2p
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ErrorMessage_ErrorCode int32
const (
ErrorMessage_PIECE_REQUEST_FAILED ErrorMessage_ErrorCode = 0
)
var ErrorMessage_ErrorCode_name = map[int32]string{
0: "PIECE_REQUEST_FAILED",
}
var ErrorMessage_ErrorCode_value = map[string]int32{
"PIECE_REQUEST_FAILED": 0,
}
func (x ErrorMessage_ErrorCode) String() string {
return proto.EnumName(ErrorMessage_ErrorCode_name, int32(x))
}
func (ErrorMessage_ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 0} }
type Message_Type int32
const (
Message_BITFIELD Message_Type = 0
Message_PIECE_REQUEST Message_Type = 1
Message_PIECE_PAYLOAD Message_Type = 2
Message_ANNOUCE_PIECE Message_Type = 3
Message_CANCEL_PIECE Message_Type = 4
Message_ERROR Message_Type = 5
Message_COMPLETE Message_Type = 6
)
var Message_Type_name = map[int32]string{
0: "BITFIELD",
1: "PIECE_REQUEST",
2: "PIECE_PAYLOAD",
3: "ANNOUCE_PIECE",
4: "CANCEL_PIECE",
5: "ERROR",
6: "COMPLETE",
}
var Message_Type_value = map[string]int32{
"BITFIELD": 0,
"PIECE_REQUEST": 1,
"PIECE_PAYLOAD": 2,
"ANNOUCE_PIECE": 3,
"CANCEL_PIECE": 4,
"ERROR": 5,
"COMPLETE": 6,
}
func (x Message_Type) String() string {
return proto.EnumName(Message_Type_name, int32(x))
}
func (Message_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} }
// Binary set of all pieces that peer has downloaded so far. Also serves as a
// handshaking message, which each peer sends once at the beginning of the
// connection to declare what their peer id is and what info hash they want to
// transmit.
type BitfieldMessage struct {
InfoHash string `protobuf:"bytes,2,opt,name=infoHash" json:"infoHash,omitempty"`
// TODO: Torrent name is the content hash. Current torrent storage is
// content addressable. Adding name as a part of handshake makes looking
// up torrents faster. If storage supports addressing torrent by infohash,
// this extra field should removed.
// XXX(codyg): We rely on this name field for announcing too, so tracker can
// look up origins that have this content.
// We currently treat infohash as verification of torrents.
Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
PeerID string `protobuf:"bytes,4,opt,name=peerID" json:"peerID,omitempty"`
BitfieldBytes []byte `protobuf:"bytes,5,opt,name=bitfieldBytes,proto3" json:"bitfieldBytes,omitempty"`
Namespace string `protobuf:"bytes,6,opt,name=namespace" json:"namespace,omitempty"`
// remoteBitfieldBytes contains the binary sets of pieces downloaded of
// all peers that the sender is currently connected to.
RemoteBitfieldBytes map[string][]byte `protobuf:"bytes,7,rep,name=remoteBitfieldBytes" json:"remoteBitfieldBytes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *BitfieldMessage) Reset() { *m = BitfieldMessage{} }
func (m *BitfieldMessage) String() string { return proto.CompactTextString(m) }
func (*BitfieldMessage) ProtoMessage() {}
func (*BitfieldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *BitfieldMessage) GetRemoteBitfieldBytes() map[string][]byte {
if m != nil {
return m.RemoteBitfieldBytes
}
return nil
}
// Requests a piece of the given index. Note: offset and length are unused fields
// and if set, will be rejected.
type PieceRequestMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
Offset int32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
Length int32 `protobuf:"varint,4,opt,name=length" json:"length,omitempty"`
}
func (m *PieceRequestMessage) Reset() { *m = PieceRequestMessage{} }
func (m *PieceRequestMessage) String() string { return proto.CompactTextString(m) }
func (*PieceRequestMessage) ProtoMessage() {}
func (*PieceRequestMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
// Provides binary payload response to a peer request. Always immediately followed
// by a binary blob sent over socket, so the receiver should be ready to treat the
// blob as a non-protobuf message.
type PiecePayloadMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
Offset int32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
Length int32 `protobuf:"varint,4,opt,name=length" json:"length,omitempty"`
Digest string `protobuf:"bytes,5,opt,name=digest" json:"digest,omitempty"`
}
func (m *PiecePayloadMessage) Reset() { *m = PiecePayloadMessage{} }
func (m *PiecePayloadMessage) String() string { return proto.CompactTextString(m) }
func (*PiecePayloadMessage) ProtoMessage() {}
func (*PiecePayloadMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
// Announces that a piece is available to other peers.
type AnnouncePieceMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
}
func (m *AnnouncePieceMessage) | () { *m = AnnouncePieceMessage{} }
func (m *AnnouncePieceMessage) String() string { return proto.CompactTextString(m) }
func (*AnnouncePieceMessage) ProtoMessage() {}
func (*AnnouncePieceMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// Unused.
type CancelPieceMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
}
func (m *CancelPieceMessage) Reset() { *m = CancelPieceMessage{} }
func (m *CancelPieceMessage) String() string { return proto.CompactTextString(m) }
func (*CancelPieceMessage) ProtoMessage() {}
func (*CancelPieceMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
// General purpose error message. Receivers may check the error code to determine
// the origin of the message.
type ErrorMessage struct {
Error string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
Index int32 `protobuf:"varint,3,opt,name=index" json:"index,omitempty"`
Code ErrorMessage_ErrorCode `protobuf:"varint,4,opt,name=code,enum=p2p.ErrorMessage_ErrorCode" json:"code,omitempty"`
}
func (m *ErrorMessage) Reset() { *m = ErrorMessage{} }
func (m *ErrorMessage) String() string { return proto.CompactTextString(m) }
func (*ErrorMessage) ProtoMessage() {}
func (*ErrorMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
// Notifies other peers that the torrent has completed and all pieces are available.
type CompleteMessage struct {
}
func (m *CompleteMessage) Reset() { *m = CompleteMessage{} }
func (m *CompleteMessage) String() string { return proto.CompactTextString(m) }
func (*CompleteMessage) ProtoMessage() {}
func (*CompleteMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type Message struct {
Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"`
Type Message_Type `protobuf:"varint,2,opt,name=type,enum=p2p.Message_Type" json:"type,omitempty"`
Bitfield *BitfieldMessage `protobuf:"bytes,3,opt,name=bitfield" json:"bitfield,omitempty"`
PieceRequest *PieceRequestMessage `protobuf:"bytes,4,opt,name=pieceRequest" json:"pieceRequest,omitempty"`
PiecePayload *PiecePayloadMessage `protobuf:"bytes,5,opt,name=piecePayload" json:"piecePayload,omitempty"`
AnnouncePiece *AnnouncePieceMessage `protobuf:"bytes,6,opt,name=announcePiece" json:"announcePiece,omitempty"`
CancelPiece *CancelPieceMessage `protobuf:"bytes,7,opt,name=cancelPiece" json:"cancelPiece,omitempty"`
Error *ErrorMessage `protobuf:"bytes,8,opt,name=error" json:"error,omitempty"`
Complete *CompleteMessage `protobuf:"bytes,9,opt,name=complete" json:"complete,omitempty"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Message) GetBitfield() *BitfieldMessage {
if m != nil {
return m.Bitfield
}
return nil
}
func (m *Message) GetPieceRequest() *PieceRequestMessage {
if m != nil {
return m.PieceRequest
}
return nil
}
func (m *Message) GetPiecePayload() *PiecePayloadMessage {
if m != nil {
return m.PiecePayload
}
return nil
}
func (m *Message) GetAnnouncePiece() *AnnouncePieceMessage {
if m != nil {
return m.AnnouncePiece
}
return nil
}
func (m *Message) GetCancelPiece() *CancelPieceMessage {
if m != nil {
return m.CancelPiece
}
return nil
}
func (m *Message) GetError() *ErrorMessage {
if m != nil {
return m.Error
}
return nil
}
func (m *Message) GetComplete() *CompleteMessage {
if m != nil {
return m.Complete
}
return nil
}
func init() {
proto.RegisterType((*BitfieldMessage)(nil), "p2p.BitfieldMessage")
proto.RegisterType((*PieceRequestMessage)(nil), "p2p.PieceRequestMessage")
proto.RegisterType((*PiecePayloadMessage)(nil), "p2p.PiecePayloadMessage")
proto.RegisterType((*AnnouncePieceMessage)(nil), "p2p.AnnouncePieceMessage")
proto.RegisterType((*CancelPieceMessage)(nil), "p2p.CancelPieceMessage")
proto.RegisterType((*ErrorMessage)(nil), "p2p.ErrorMessage")
proto.RegisterType((*CompleteMessage)(nil), "p2p.CompleteMessage")
proto.RegisterType((*Message)(nil), "p2p.Message")
proto.RegisterEnum("p2p.ErrorMessage_ErrorCode", ErrorMessage_ErrorCode_name, ErrorMessage_ErrorCode_value)
proto.RegisterEnum("p2p.Message_Type", Message_Type_name, Message_Type_value)
}
func init() { proto.RegisterFile("proto/p2p/p2p.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 647 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x6f, 0xd3, 0x4e,
0x10, 0x6d, 0x12, 0x3b, 0x7f, 0x26, 0x69, 0xeb, 0x6c, 0xa3, 0xdf, 0xcf, 0x14, 0x0e, 0x95, 0x45,
0x45, 0x85, 0xa0, 0xad, 0xcc, 0x05, 0x10, 0x12, 0x4a, 0x1c, 0x57, 0x44, 0x4a, 0x9b, 0xb0, 0xa4,
0x07, 0xc4, 0xa1, 0x72, 0x9d, 0x49, 0x6b, 0x91, 0x7a, 0x8d, 0xed, 0x56, 0xcd, 0xd7, 0xe0, 0x43,
0x21, 0xf1, 0xad, 0xd0, 0x4e, 0xec, 0xc4, 0x6e, 0x02, 0xe2, 0xc0, 0x21, 0x92, 0xdf, 0xf3, 0x7b,
0xb3, 0xb3, 0x33, 0x2f, 0x86, 0x9d, 0x20, 0x14, 0xb1, 0x38, 0x0a, 0xcc, 0x40, 0xfe, 0x0e, 0x09,
0xb1, 0x52, 0x60, 0x06, 0xc6, 0x8f, 0x22, 0x6c, 0x77, 0xbc, 0x78, 0xe2, 0xe1, 0x74, 0x7c, 0x8a,
0x51, 0xe4, 0x5c, 0x21, 0xdb, 0x85, 0xaa, 0xe7, 0x4f, 0xc4, 0x07, 0x27, 0xba, 0xd6, 0x8b, 0x7b,
0x85, 0x83, 0x1a, 0x5f, 0x60, 0xc6, 0x40, 0xf1, 0x9d, 0x1b, 0xd4, 0x4b, 0xc4, 0xd3, 0x33, 0xfb,
0x0f, 0xca, 0x01, 0x62, 0xd8, 0xeb, 0xea, 0x0a, 0xb1, 0x09, 0x62, 0x4f, 0x61, 0xf3, 0x32, 0x29,
0xdd, 0x99, 0xc5, 0x18, 0xe9, 0xea, 0x5e, 0xe1, 0xa0, 0xc1, 0xf3, 0x24, 0x7b, 0x02, 0x35, 0x59,
0x25, 0x0a, 0x1c, 0x17, 0xf5, 0x32, 0x15, 0x58, 0x12, 0xec, 0x02, 0x76, 0x42, 0xbc, 0x11, 0x31,
0x76, 0x72, 0x95, 0x2a, 0x7b, 0xa5, 0x83, 0xba, 0xf9, 0xf2, 0x50, 0xde, 0xe6, 0x41, 0xfb, 0x87,
0x7c, 0x55, 0x6f, 0xfb, 0x71, 0x38, 0xe3, 0xeb, 0x2a, 0xed, 0x9e, 0x80, 0xfe, 0x3b, 0x03, 0xd3,
0xa0, 0xf4, 0x15, 0x67, 0x7a, 0x81, 0x9a, 0x92, 0x8f, 0xac, 0x05, 0xea, 0x9d, 0x33, 0xbd, 0x45,
0x9a, 0x4b, 0x83, 0xcf, 0xc1, 0xdb, 0xe2, 0xeb, 0x82, 0xf1, 0x05, 0x76, 0x86, 0x1e, 0xba, 0xc8,
0xf1, 0xdb, 0x2d, 0x46, 0x71, 0x3a, 0xcb, 0x16, 0xa8, 0x9e, 0x3f, 0xc6, 0x7b, 0x32, 0xa8, 0x7c,
0x0e, 0xe4, 0xc4, 0xc4, 0x64, 0x12, 0x61, 0x4c, 0x73, 0x54, 0x79, 0x82, 0x24, 0x3f, 0x45, 0xff,
0x2a, 0xbe, 0xa6, 0x49, 0xaa, 0x3c, 0x41, 0x46, 0x94, 0x14, 0x1f, 0x3a, 0xb3, 0xa9, 0x70, 0xc6,
0xff, 0xb4, 0xb8, 0xe4, 0xc7, 0xde, 0x15, 0x46, 0x31, 0xed, 0xa7, 0xc6, 0x13, 0x64, 0xbc, 0x80,
0x56, 0xdb, 0xf7, 0xc5, 0xad, 0xef, 0x22, 0x1d, 0xfe, 0xc7, 0x53, 0x8d, 0xe7, 0xc0, 0x2c, 0xc7,
0x77, 0x71, 0xfa, 0x17, 0xda, 0xef, 0x05, 0x68, 0xd8, 0x61, 0x28, 0xc2, 0x8c, 0x0c, 0x25, 0x4e,
0xe2, 0x36, 0x07, 0x4b, 0x73, 0x29, 0x7b, 0xbd, 0x23, 0x50, 0x5c, 0x31, 0x46, 0xba, 0xc4, 0x96,
0xf9, 0x98, 0x22, 0x90, 0x2d, 0x36, 0x07, 0x96, 0x18, 0x23, 0x27, 0xa1, 0xb1, 0x0f, 0xb5, 0x05,
0xc5, 0x74, 0x68, 0x0d, 0x7b, 0xb6, 0x65, 0x5f, 0x70, 0xfb, 0xe3, 0xb9, 0xfd, 0x69, 0x74, 0x71,
0xd2, 0xee, 0xf5, 0xed, 0xae, 0xb6, 0x61, 0x34, 0x61, 0xdb, 0x12, 0x37, 0xc1, 0x14, 0xe3, 0xb4,
0x7b, 0xe3, 0xa7, 0x02, 0x95, 0xb4, 0x45, 0x1d, 0x2a, 0x77, 0x18, 0x46, 0x9e, 0xf0, 0x93, 0x3c,
0xa4, 0x90, 0xed, 0x83, 0x12, 0xcf, 0x82, 0x79, 0x24, 0xb6, 0xcc, 0x26, 0x35, 0x94, 0xf6, 0x32,
0x9a, 0x05, 0xc8, 0xe9, 0x35, 0x3b, 0x86, 0x6a, 0x1a, 0x7c, 0xba, 0x50, 0xdd, 0x6c, 0xad, 0x8b,
0x2f, 0x5f, 0xa8, 0xd8, 0x3b, 0x68, 0x04, 0x99, 0x48, 0xd1, 0x8d, 0xeb, 0xa6, 0x4e, 0xae, 0x35,
0x59, 0xe3, 0x39, 0xf5, 0xc2, 0x9d, 0x64, 0x86, 0x96, 0x9b, 0x73, 0xe7, 0xc3, 0xc4, 0x73, 0x6a,
0xf6, 0x1e, 0x36, 0x9d, 0xec, 0xf2, 0xe9, 0x9f, 0x59, 0x37, 0x1f, 0x91, 0x7d, 0x5d, 0x2c, 0x78,
0x5e, 0xcf, 0xde, 0x40, 0xdd, 0x5d, 0xe6, 0x41, 0xaf, 0x90, 0xfd, 0x7f, 0xb2, 0xaf, 0xe6, 0x84,
0x67, 0xb5, 0xec, 0x59, 0x9a, 0x86, 0x2a, 0x99, 0x9a, 0x2b, 0x2b, 0x4e, 0x03, 0x72, 0x0c, 0x55,
0x37, 0x59, 0x99, 0x5e, 0xcb, 0x8c, 0xf4, 0xc1, 0x1e, 0xf9, 0x42, 0x65, 0xdc, 0x83, 0x22, 0x57,
0xc2, 0x1a, 0x50, 0xed, 0xf4, 0x46, 0x27, 0x3d, 0xbb, 0xdf, 0xd5, 0x36, 0x58, 0x13, 0x36, 0x73,
0xa1, 0xd0, 0x0a, 0x4b, 0x6a, 0xd8, 0xfe, 0xdc, 0x1f, 0xb4, 0xbb, 0x5a, 0x51, 0x52, 0xed, 0xb3,
0xb3, 0xc1, 0xb9, 0x24, 0xe5, 0x2b, 0xad, 0xc4, 0x34, 0x68, 0x58, 0xed, 0x33, 0xcb, 0xee, 0x27,
0x8c, 0xc2, 0x6a, 0xa0, 0xda, 0x9c, 0x0f, 0xb8, 0xa6, 0xca, 0x33, 0xac, 0xc1, 0xe9, 0xb0, 0x6f,
0x8f, 0x6c, 0xad, 0x7c, 0x59, 0xa6, 0x8f, 0xee, 0xab, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd,
0x11, 0xd8, 0xe6, 0x8b, 0x05, 0x00, 0x00,
}
| Reset | identifier_name |
p2p.pb.go | // Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go.
// source: proto/p2p/p2p.proto
// DO NOT EDIT!
/*
Package p2p is a generated protocol buffer package.
It is generated from these files:
proto/p2p/p2p.proto
It has these top-level messages:
BitfieldMessage
PieceRequestMessage
PiecePayloadMessage
AnnouncePieceMessage
CancelPieceMessage
ErrorMessage
CompleteMessage
Message
*/
package p2p
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ErrorMessage_ErrorCode int32
const (
ErrorMessage_PIECE_REQUEST_FAILED ErrorMessage_ErrorCode = 0
)
var ErrorMessage_ErrorCode_name = map[int32]string{
0: "PIECE_REQUEST_FAILED",
} | func (x ErrorMessage_ErrorCode) String() string {
return proto.EnumName(ErrorMessage_ErrorCode_name, int32(x))
}
func (ErrorMessage_ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 0} }
type Message_Type int32
const (
Message_BITFIELD Message_Type = 0
Message_PIECE_REQUEST Message_Type = 1
Message_PIECE_PAYLOAD Message_Type = 2
Message_ANNOUCE_PIECE Message_Type = 3
Message_CANCEL_PIECE Message_Type = 4
Message_ERROR Message_Type = 5
Message_COMPLETE Message_Type = 6
)
var Message_Type_name = map[int32]string{
0: "BITFIELD",
1: "PIECE_REQUEST",
2: "PIECE_PAYLOAD",
3: "ANNOUCE_PIECE",
4: "CANCEL_PIECE",
5: "ERROR",
6: "COMPLETE",
}
var Message_Type_value = map[string]int32{
"BITFIELD": 0,
"PIECE_REQUEST": 1,
"PIECE_PAYLOAD": 2,
"ANNOUCE_PIECE": 3,
"CANCEL_PIECE": 4,
"ERROR": 5,
"COMPLETE": 6,
}
func (x Message_Type) String() string {
return proto.EnumName(Message_Type_name, int32(x))
}
func (Message_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} }
// Binary set of all pieces that peer has downloaded so far. Also serves as a
// handshaking message, which each peer sends once at the beginning of the
// connection to declare what their peer id is and what info hash they want to
// transmit.
type BitfieldMessage struct {
InfoHash string `protobuf:"bytes,2,opt,name=infoHash" json:"infoHash,omitempty"`
// TODO: Torrent name is the content hash. Current torrent storage is
// content addressable. Adding name as a part of handshake makes looking
// up torrents faster. If storage supports addressing torrent by infohash,
// this extra field should removed.
// XXX(codyg): We rely on this name field for announcing too, so tracker can
// look up origins that have this content.
// We currently treat infohash as verification of torrents.
Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
PeerID string `protobuf:"bytes,4,opt,name=peerID" json:"peerID,omitempty"`
BitfieldBytes []byte `protobuf:"bytes,5,opt,name=bitfieldBytes,proto3" json:"bitfieldBytes,omitempty"`
Namespace string `protobuf:"bytes,6,opt,name=namespace" json:"namespace,omitempty"`
// remoteBitfieldBytes contains the binary sets of pieces downloaded of
// all peers that the sender is currently connected to.
RemoteBitfieldBytes map[string][]byte `protobuf:"bytes,7,rep,name=remoteBitfieldBytes" json:"remoteBitfieldBytes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *BitfieldMessage) Reset() { *m = BitfieldMessage{} }
func (m *BitfieldMessage) String() string { return proto.CompactTextString(m) }
func (*BitfieldMessage) ProtoMessage() {}
func (*BitfieldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *BitfieldMessage) GetRemoteBitfieldBytes() map[string][]byte {
if m != nil {
return m.RemoteBitfieldBytes
}
return nil
}
// Requests a piece of the given index. Note: offset and length are unused fields
// and if set, will be rejected.
type PieceRequestMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
Offset int32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
Length int32 `protobuf:"varint,4,opt,name=length" json:"length,omitempty"`
}
func (m *PieceRequestMessage) Reset() { *m = PieceRequestMessage{} }
func (m *PieceRequestMessage) String() string { return proto.CompactTextString(m) }
func (*PieceRequestMessage) ProtoMessage() {}
func (*PieceRequestMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
// Provides binary payload response to a peer request. Always immediately followed
// by a binary blob sent over socket, so the receiver should be ready to treat the
// blob as a non-protobuf message.
type PiecePayloadMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
Offset int32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
Length int32 `protobuf:"varint,4,opt,name=length" json:"length,omitempty"`
Digest string `protobuf:"bytes,5,opt,name=digest" json:"digest,omitempty"`
}
func (m *PiecePayloadMessage) Reset() { *m = PiecePayloadMessage{} }
func (m *PiecePayloadMessage) String() string { return proto.CompactTextString(m) }
func (*PiecePayloadMessage) ProtoMessage() {}
func (*PiecePayloadMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
// Announces that a piece is available to other peers.
type AnnouncePieceMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
}
func (m *AnnouncePieceMessage) Reset() { *m = AnnouncePieceMessage{} }
func (m *AnnouncePieceMessage) String() string { return proto.CompactTextString(m) }
func (*AnnouncePieceMessage) ProtoMessage() {}
func (*AnnouncePieceMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// Unused.
type CancelPieceMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
}
func (m *CancelPieceMessage) Reset() { *m = CancelPieceMessage{} }
func (m *CancelPieceMessage) String() string { return proto.CompactTextString(m) }
func (*CancelPieceMessage) ProtoMessage() {}
func (*CancelPieceMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
// General purpose error message. Receivers may check the error code to determine
// the origin of the message.
type ErrorMessage struct {
Error string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
Index int32 `protobuf:"varint,3,opt,name=index" json:"index,omitempty"`
Code ErrorMessage_ErrorCode `protobuf:"varint,4,opt,name=code,enum=p2p.ErrorMessage_ErrorCode" json:"code,omitempty"`
}
func (m *ErrorMessage) Reset() { *m = ErrorMessage{} }
func (m *ErrorMessage) String() string { return proto.CompactTextString(m) }
func (*ErrorMessage) ProtoMessage() {}
func (*ErrorMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
// Notifies other peers that the torrent has completed and all pieces are available.
type CompleteMessage struct {
}
func (m *CompleteMessage) Reset() { *m = CompleteMessage{} }
func (m *CompleteMessage) String() string { return proto.CompactTextString(m) }
func (*CompleteMessage) ProtoMessage() {}
func (*CompleteMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type Message struct {
Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"`
Type Message_Type `protobuf:"varint,2,opt,name=type,enum=p2p.Message_Type" json:"type,omitempty"`
Bitfield *BitfieldMessage `protobuf:"bytes,3,opt,name=bitfield" json:"bitfield,omitempty"`
PieceRequest *PieceRequestMessage `protobuf:"bytes,4,opt,name=pieceRequest" json:"pieceRequest,omitempty"`
PiecePayload *PiecePayloadMessage `protobuf:"bytes,5,opt,name=piecePayload" json:"piecePayload,omitempty"`
AnnouncePiece *AnnouncePieceMessage `protobuf:"bytes,6,opt,name=announcePiece" json:"announcePiece,omitempty"`
CancelPiece *CancelPieceMessage `protobuf:"bytes,7,opt,name=cancelPiece" json:"cancelPiece,omitempty"`
Error *ErrorMessage `protobuf:"bytes,8,opt,name=error" json:"error,omitempty"`
Complete *CompleteMessage `protobuf:"bytes,9,opt,name=complete" json:"complete,omitempty"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Message) GetBitfield() *BitfieldMessage {
if m != nil {
return m.Bitfield
}
return nil
}
func (m *Message) GetPieceRequest() *PieceRequestMessage {
if m != nil {
return m.PieceRequest
}
return nil
}
func (m *Message) GetPiecePayload() *PiecePayloadMessage {
if m != nil {
return m.PiecePayload
}
return nil
}
func (m *Message) GetAnnouncePiece() *AnnouncePieceMessage {
if m != nil {
return m.AnnouncePiece
}
return nil
}
func (m *Message) GetCancelPiece() *CancelPieceMessage {
if m != nil {
return m.CancelPiece
}
return nil
}
func (m *Message) GetError() *ErrorMessage {
if m != nil {
return m.Error
}
return nil
}
func (m *Message) GetComplete() *CompleteMessage {
if m != nil {
return m.Complete
}
return nil
}
func init() {
proto.RegisterType((*BitfieldMessage)(nil), "p2p.BitfieldMessage")
proto.RegisterType((*PieceRequestMessage)(nil), "p2p.PieceRequestMessage")
proto.RegisterType((*PiecePayloadMessage)(nil), "p2p.PiecePayloadMessage")
proto.RegisterType((*AnnouncePieceMessage)(nil), "p2p.AnnouncePieceMessage")
proto.RegisterType((*CancelPieceMessage)(nil), "p2p.CancelPieceMessage")
proto.RegisterType((*ErrorMessage)(nil), "p2p.ErrorMessage")
proto.RegisterType((*CompleteMessage)(nil), "p2p.CompleteMessage")
proto.RegisterType((*Message)(nil), "p2p.Message")
proto.RegisterEnum("p2p.ErrorMessage_ErrorCode", ErrorMessage_ErrorCode_name, ErrorMessage_ErrorCode_value)
proto.RegisterEnum("p2p.Message_Type", Message_Type_name, Message_Type_value)
}
func init() { proto.RegisterFile("proto/p2p/p2p.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 647 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x6f, 0xd3, 0x4e,
0x10, 0x6d, 0x12, 0x3b, 0x7f, 0x26, 0x69, 0xeb, 0x6c, 0xa3, 0xdf, 0xcf, 0x14, 0x0e, 0x95, 0x45,
0x45, 0x85, 0xa0, 0xad, 0xcc, 0x05, 0x10, 0x12, 0x4a, 0x1c, 0x57, 0x44, 0x4a, 0x9b, 0xb0, 0xa4,
0x07, 0xc4, 0xa1, 0x72, 0x9d, 0x49, 0x6b, 0x91, 0x7a, 0x8d, 0xed, 0x56, 0xcd, 0xd7, 0xe0, 0x43,
0x21, 0xf1, 0xad, 0xd0, 0x4e, 0xec, 0xc4, 0x6e, 0x02, 0xe2, 0xc0, 0x21, 0x92, 0xdf, 0xf3, 0x7b,
0xb3, 0xb3, 0x33, 0x2f, 0x86, 0x9d, 0x20, 0x14, 0xb1, 0x38, 0x0a, 0xcc, 0x40, 0xfe, 0x0e, 0x09,
0xb1, 0x52, 0x60, 0x06, 0xc6, 0x8f, 0x22, 0x6c, 0x77, 0xbc, 0x78, 0xe2, 0xe1, 0x74, 0x7c, 0x8a,
0x51, 0xe4, 0x5c, 0x21, 0xdb, 0x85, 0xaa, 0xe7, 0x4f, 0xc4, 0x07, 0x27, 0xba, 0xd6, 0x8b, 0x7b,
0x85, 0x83, 0x1a, 0x5f, 0x60, 0xc6, 0x40, 0xf1, 0x9d, 0x1b, 0xd4, 0x4b, 0xc4, 0xd3, 0x33, 0xfb,
0x0f, 0xca, 0x01, 0x62, 0xd8, 0xeb, 0xea, 0x0a, 0xb1, 0x09, 0x62, 0x4f, 0x61, 0xf3, 0x32, 0x29,
0xdd, 0x99, 0xc5, 0x18, 0xe9, 0xea, 0x5e, 0xe1, 0xa0, 0xc1, 0xf3, 0x24, 0x7b, 0x02, 0x35, 0x59,
0x25, 0x0a, 0x1c, 0x17, 0xf5, 0x32, 0x15, 0x58, 0x12, 0xec, 0x02, 0x76, 0x42, 0xbc, 0x11, 0x31,
0x76, 0x72, 0x95, 0x2a, 0x7b, 0xa5, 0x83, 0xba, 0xf9, 0xf2, 0x50, 0xde, 0xe6, 0x41, 0xfb, 0x87,
0x7c, 0x55, 0x6f, 0xfb, 0x71, 0x38, 0xe3, 0xeb, 0x2a, 0xed, 0x9e, 0x80, 0xfe, 0x3b, 0x03, 0xd3,
0xa0, 0xf4, 0x15, 0x67, 0x7a, 0x81, 0x9a, 0x92, 0x8f, 0xac, 0x05, 0xea, 0x9d, 0x33, 0xbd, 0x45,
0x9a, 0x4b, 0x83, 0xcf, 0xc1, 0xdb, 0xe2, 0xeb, 0x82, 0xf1, 0x05, 0x76, 0x86, 0x1e, 0xba, 0xc8,
0xf1, 0xdb, 0x2d, 0x46, 0x71, 0x3a, 0xcb, 0x16, 0xa8, 0x9e, 0x3f, 0xc6, 0x7b, 0x32, 0xa8, 0x7c,
0x0e, 0xe4, 0xc4, 0xc4, 0x64, 0x12, 0x61, 0x4c, 0x73, 0x54, 0x79, 0x82, 0x24, 0x3f, 0x45, 0xff,
0x2a, 0xbe, 0xa6, 0x49, 0xaa, 0x3c, 0x41, 0x46, 0x94, 0x14, 0x1f, 0x3a, 0xb3, 0xa9, 0x70, 0xc6,
0xff, 0xb4, 0xb8, 0xe4, 0xc7, 0xde, 0x15, 0x46, 0x31, 0xed, 0xa7, 0xc6, 0x13, 0x64, 0xbc, 0x80,
0x56, 0xdb, 0xf7, 0xc5, 0xad, 0xef, 0x22, 0x1d, 0xfe, 0xc7, 0x53, 0x8d, 0xe7, 0xc0, 0x2c, 0xc7,
0x77, 0x71, 0xfa, 0x17, 0xda, 0xef, 0x05, 0x68, 0xd8, 0x61, 0x28, 0xc2, 0x8c, 0x0c, 0x25, 0x4e,
0xe2, 0x36, 0x07, 0x4b, 0x73, 0x29, 0x7b, 0xbd, 0x23, 0x50, 0x5c, 0x31, 0x46, 0xba, 0xc4, 0x96,
0xf9, 0x98, 0x22, 0x90, 0x2d, 0x36, 0x07, 0x96, 0x18, 0x23, 0x27, 0xa1, 0xb1, 0x0f, 0xb5, 0x05,
0xc5, 0x74, 0x68, 0x0d, 0x7b, 0xb6, 0x65, 0x5f, 0x70, 0xfb, 0xe3, 0xb9, 0xfd, 0x69, 0x74, 0x71,
0xd2, 0xee, 0xf5, 0xed, 0xae, 0xb6, 0x61, 0x34, 0x61, 0xdb, 0x12, 0x37, 0xc1, 0x14, 0xe3, 0xb4,
0x7b, 0xe3, 0xa7, 0x02, 0x95, 0xb4, 0x45, 0x1d, 0x2a, 0x77, 0x18, 0x46, 0x9e, 0xf0, 0x93, 0x3c,
0xa4, 0x90, 0xed, 0x83, 0x12, 0xcf, 0x82, 0x79, 0x24, 0xb6, 0xcc, 0x26, 0x35, 0x94, 0xf6, 0x32,
0x9a, 0x05, 0xc8, 0xe9, 0x35, 0x3b, 0x86, 0x6a, 0x1a, 0x7c, 0xba, 0x50, 0xdd, 0x6c, 0xad, 0x8b,
0x2f, 0x5f, 0xa8, 0xd8, 0x3b, 0x68, 0x04, 0x99, 0x48, 0xd1, 0x8d, 0xeb, 0xa6, 0x4e, 0xae, 0x35,
0x59, 0xe3, 0x39, 0xf5, 0xc2, 0x9d, 0x64, 0x86, 0x96, 0x9b, 0x73, 0xe7, 0xc3, 0xc4, 0x73, 0x6a,
0xf6, 0x1e, 0x36, 0x9d, 0xec, 0xf2, 0xe9, 0x9f, 0x59, 0x37, 0x1f, 0x91, 0x7d, 0x5d, 0x2c, 0x78,
0x5e, 0xcf, 0xde, 0x40, 0xdd, 0x5d, 0xe6, 0x41, 0xaf, 0x90, 0xfd, 0x7f, 0xb2, 0xaf, 0xe6, 0x84,
0x67, 0xb5, 0xec, 0x59, 0x9a, 0x86, 0x2a, 0x99, 0x9a, 0x2b, 0x2b, 0x4e, 0x03, 0x72, 0x0c, 0x55,
0x37, 0x59, 0x99, 0x5e, 0xcb, 0x8c, 0xf4, 0xc1, 0x1e, 0xf9, 0x42, 0x65, 0xdc, 0x83, 0x22, 0x57,
0xc2, 0x1a, 0x50, 0xed, 0xf4, 0x46, 0x27, 0x3d, 0xbb, 0xdf, 0xd5, 0x36, 0x58, 0x13, 0x36, 0x73,
0xa1, 0xd0, 0x0a, 0x4b, 0x6a, 0xd8, 0xfe, 0xdc, 0x1f, 0xb4, 0xbb, 0x5a, 0x51, 0x52, 0xed, 0xb3,
0xb3, 0xc1, 0xb9, 0x24, 0xe5, 0x2b, 0xad, 0xc4, 0x34, 0x68, 0x58, 0xed, 0x33, 0xcb, 0xee, 0x27,
0x8c, 0xc2, 0x6a, 0xa0, 0xda, 0x9c, 0x0f, 0xb8, 0xa6, 0xca, 0x33, 0xac, 0xc1, 0xe9, 0xb0, 0x6f,
0x8f, 0x6c, 0xad, 0x7c, 0x59, 0xa6, 0x8f, 0xee, 0xab, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd,
0x11, 0xd8, 0xe6, 0x8b, 0x05, 0x00, 0x00,
} | var ErrorMessage_ErrorCode_value = map[string]int32{
"PIECE_REQUEST_FAILED": 0,
}
| random_line_split |
p2p.pb.go | // Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go.
// source: proto/p2p/p2p.proto
// DO NOT EDIT!
/*
Package p2p is a generated protocol buffer package.
It is generated from these files:
proto/p2p/p2p.proto
It has these top-level messages:
BitfieldMessage
PieceRequestMessage
PiecePayloadMessage
AnnouncePieceMessage
CancelPieceMessage
ErrorMessage
CompleteMessage
Message
*/
package p2p
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ErrorMessage_ErrorCode int32
const (
ErrorMessage_PIECE_REQUEST_FAILED ErrorMessage_ErrorCode = 0
)
var ErrorMessage_ErrorCode_name = map[int32]string{
0: "PIECE_REQUEST_FAILED",
}
var ErrorMessage_ErrorCode_value = map[string]int32{
"PIECE_REQUEST_FAILED": 0,
}
func (x ErrorMessage_ErrorCode) String() string {
return proto.EnumName(ErrorMessage_ErrorCode_name, int32(x))
}
func (ErrorMessage_ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 0} }
type Message_Type int32
const (
Message_BITFIELD Message_Type = 0
Message_PIECE_REQUEST Message_Type = 1
Message_PIECE_PAYLOAD Message_Type = 2
Message_ANNOUCE_PIECE Message_Type = 3
Message_CANCEL_PIECE Message_Type = 4
Message_ERROR Message_Type = 5
Message_COMPLETE Message_Type = 6
)
var Message_Type_name = map[int32]string{
0: "BITFIELD",
1: "PIECE_REQUEST",
2: "PIECE_PAYLOAD",
3: "ANNOUCE_PIECE",
4: "CANCEL_PIECE",
5: "ERROR",
6: "COMPLETE",
}
var Message_Type_value = map[string]int32{
"BITFIELD": 0,
"PIECE_REQUEST": 1,
"PIECE_PAYLOAD": 2,
"ANNOUCE_PIECE": 3,
"CANCEL_PIECE": 4,
"ERROR": 5,
"COMPLETE": 6,
}
func (x Message_Type) String() string {
return proto.EnumName(Message_Type_name, int32(x))
}
func (Message_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} }
// Binary set of all pieces that peer has downloaded so far. Also serves as a
// handshaking message, which each peer sends once at the beginning of the
// connection to declare what their peer id is and what info hash they want to
// transmit.
type BitfieldMessage struct {
InfoHash string `protobuf:"bytes,2,opt,name=infoHash" json:"infoHash,omitempty"`
// TODO: Torrent name is the content hash. Current torrent storage is
// content addressable. Adding name as a part of handshake makes looking
// up torrents faster. If storage supports addressing torrent by infohash,
// this extra field should removed.
// XXX(codyg): We rely on this name field for announcing too, so tracker can
// look up origins that have this content.
// We currently treat infohash as verification of torrents.
Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
PeerID string `protobuf:"bytes,4,opt,name=peerID" json:"peerID,omitempty"`
BitfieldBytes []byte `protobuf:"bytes,5,opt,name=bitfieldBytes,proto3" json:"bitfieldBytes,omitempty"`
Namespace string `protobuf:"bytes,6,opt,name=namespace" json:"namespace,omitempty"`
// remoteBitfieldBytes contains the binary sets of pieces downloaded of
// all peers that the sender is currently connected to.
RemoteBitfieldBytes map[string][]byte `protobuf:"bytes,7,rep,name=remoteBitfieldBytes" json:"remoteBitfieldBytes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *BitfieldMessage) Reset() { *m = BitfieldMessage{} }
func (m *BitfieldMessage) String() string { return proto.CompactTextString(m) }
func (*BitfieldMessage) ProtoMessage() {}
func (*BitfieldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *BitfieldMessage) GetRemoteBitfieldBytes() map[string][]byte {
if m != nil {
return m.RemoteBitfieldBytes
}
return nil
}
// Requests a piece of the given index. Note: offset and length are unused fields
// and if set, will be rejected.
type PieceRequestMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
Offset int32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
Length int32 `protobuf:"varint,4,opt,name=length" json:"length,omitempty"`
}
func (m *PieceRequestMessage) Reset() { *m = PieceRequestMessage{} }
func (m *PieceRequestMessage) String() string { return proto.CompactTextString(m) }
func (*PieceRequestMessage) ProtoMessage() {}
func (*PieceRequestMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
// Provides binary payload response to a peer request. Always immediately followed
// by a binary blob sent over socket, so the receiver should be ready to treat the
// blob as a non-protobuf message.
type PiecePayloadMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
Offset int32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
Length int32 `protobuf:"varint,4,opt,name=length" json:"length,omitempty"`
Digest string `protobuf:"bytes,5,opt,name=digest" json:"digest,omitempty"`
}
func (m *PiecePayloadMessage) Reset() { *m = PiecePayloadMessage{} }
func (m *PiecePayloadMessage) String() string { return proto.CompactTextString(m) }
func (*PiecePayloadMessage) ProtoMessage() {}
func (*PiecePayloadMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
// Announces that a piece is available to other peers.
type AnnouncePieceMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
}
func (m *AnnouncePieceMessage) Reset() { *m = AnnouncePieceMessage{} }
func (m *AnnouncePieceMessage) String() string { return proto.CompactTextString(m) }
func (*AnnouncePieceMessage) ProtoMessage() {}
func (*AnnouncePieceMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// Unused.
type CancelPieceMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
}
func (m *CancelPieceMessage) Reset() { *m = CancelPieceMessage{} }
func (m *CancelPieceMessage) String() string { return proto.CompactTextString(m) }
func (*CancelPieceMessage) ProtoMessage() {}
func (*CancelPieceMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
// General purpose error message. Receivers may check the error code to determine
// the origin of the message.
type ErrorMessage struct {
Error string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
Index int32 `protobuf:"varint,3,opt,name=index" json:"index,omitempty"`
Code ErrorMessage_ErrorCode `protobuf:"varint,4,opt,name=code,enum=p2p.ErrorMessage_ErrorCode" json:"code,omitempty"`
}
func (m *ErrorMessage) Reset() { *m = ErrorMessage{} }
func (m *ErrorMessage) String() string { return proto.CompactTextString(m) }
func (*ErrorMessage) ProtoMessage() {}
func (*ErrorMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
// Notifies other peers that the torrent has completed and all pieces are available.
type CompleteMessage struct {
}
func (m *CompleteMessage) Reset() { *m = CompleteMessage{} }
func (m *CompleteMessage) String() string { return proto.CompactTextString(m) }
func (*CompleteMessage) ProtoMessage() {}
func (*CompleteMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type Message struct {
Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"`
Type Message_Type `protobuf:"varint,2,opt,name=type,enum=p2p.Message_Type" json:"type,omitempty"`
Bitfield *BitfieldMessage `protobuf:"bytes,3,opt,name=bitfield" json:"bitfield,omitempty"`
PieceRequest *PieceRequestMessage `protobuf:"bytes,4,opt,name=pieceRequest" json:"pieceRequest,omitempty"`
PiecePayload *PiecePayloadMessage `protobuf:"bytes,5,opt,name=piecePayload" json:"piecePayload,omitempty"`
AnnouncePiece *AnnouncePieceMessage `protobuf:"bytes,6,opt,name=announcePiece" json:"announcePiece,omitempty"`
CancelPiece *CancelPieceMessage `protobuf:"bytes,7,opt,name=cancelPiece" json:"cancelPiece,omitempty"`
Error *ErrorMessage `protobuf:"bytes,8,opt,name=error" json:"error,omitempty"`
Complete *CompleteMessage `protobuf:"bytes,9,opt,name=complete" json:"complete,omitempty"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Message) GetBitfield() *BitfieldMessage {
if m != nil {
return m.Bitfield
}
return nil
}
func (m *Message) GetPieceRequest() *PieceRequestMessage {
if m != nil {
return m.PieceRequest
}
return nil
}
func (m *Message) GetPiecePayload() *PiecePayloadMessage {
if m != nil {
return m.PiecePayload
}
return nil
}
func (m *Message) GetAnnouncePiece() *AnnouncePieceMessage {
if m != nil {
return m.AnnouncePiece
}
return nil
}
func (m *Message) GetCancelPiece() *CancelPieceMessage {
if m != nil {
return m.CancelPiece
}
return nil
}
func (m *Message) GetError() *ErrorMessage {
if m != nil {
return m.Error
}
return nil
}
func (m *Message) GetComplete() *CompleteMessage {
if m != nil {
return m.Complete
}
return nil
}
func init() {
proto.RegisterType((*BitfieldMessage)(nil), "p2p.BitfieldMessage")
proto.RegisterType((*PieceRequestMessage)(nil), "p2p.PieceRequestMessage")
proto.RegisterType((*PiecePayloadMessage)(nil), "p2p.PiecePayloadMessage")
proto.RegisterType((*AnnouncePieceMessage)(nil), "p2p.AnnouncePieceMessage")
proto.RegisterType((*CancelPieceMessage)(nil), "p2p.CancelPieceMessage")
proto.RegisterType((*ErrorMessage)(nil), "p2p.ErrorMessage")
proto.RegisterType((*CompleteMessage)(nil), "p2p.CompleteMessage")
proto.RegisterType((*Message)(nil), "p2p.Message")
proto.RegisterEnum("p2p.ErrorMessage_ErrorCode", ErrorMessage_ErrorCode_name, ErrorMessage_ErrorCode_value)
proto.RegisterEnum("p2p.Message_Type", Message_Type_name, Message_Type_value)
}
func init() |
var fileDescriptor0 = []byte{
// 647 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x6f, 0xd3, 0x4e,
0x10, 0x6d, 0x12, 0x3b, 0x7f, 0x26, 0x69, 0xeb, 0x6c, 0xa3, 0xdf, 0xcf, 0x14, 0x0e, 0x95, 0x45,
0x45, 0x85, 0xa0, 0xad, 0xcc, 0x05, 0x10, 0x12, 0x4a, 0x1c, 0x57, 0x44, 0x4a, 0x9b, 0xb0, 0xa4,
0x07, 0xc4, 0xa1, 0x72, 0x9d, 0x49, 0x6b, 0x91, 0x7a, 0x8d, 0xed, 0x56, 0xcd, 0xd7, 0xe0, 0x43,
0x21, 0xf1, 0xad, 0xd0, 0x4e, 0xec, 0xc4, 0x6e, 0x02, 0xe2, 0xc0, 0x21, 0x92, 0xdf, 0xf3, 0x7b,
0xb3, 0xb3, 0x33, 0x2f, 0x86, 0x9d, 0x20, 0x14, 0xb1, 0x38, 0x0a, 0xcc, 0x40, 0xfe, 0x0e, 0x09,
0xb1, 0x52, 0x60, 0x06, 0xc6, 0x8f, 0x22, 0x6c, 0x77, 0xbc, 0x78, 0xe2, 0xe1, 0x74, 0x7c, 0x8a,
0x51, 0xe4, 0x5c, 0x21, 0xdb, 0x85, 0xaa, 0xe7, 0x4f, 0xc4, 0x07, 0x27, 0xba, 0xd6, 0x8b, 0x7b,
0x85, 0x83, 0x1a, 0x5f, 0x60, 0xc6, 0x40, 0xf1, 0x9d, 0x1b, 0xd4, 0x4b, 0xc4, 0xd3, 0x33, 0xfb,
0x0f, 0xca, 0x01, 0x62, 0xd8, 0xeb, 0xea, 0x0a, 0xb1, 0x09, 0x62, 0x4f, 0x61, 0xf3, 0x32, 0x29,
0xdd, 0x99, 0xc5, 0x18, 0xe9, 0xea, 0x5e, 0xe1, 0xa0, 0xc1, 0xf3, 0x24, 0x7b, 0x02, 0x35, 0x59,
0x25, 0x0a, 0x1c, 0x17, 0xf5, 0x32, 0x15, 0x58, 0x12, 0xec, 0x02, 0x76, 0x42, 0xbc, 0x11, 0x31,
0x76, 0x72, 0x95, 0x2a, 0x7b, 0xa5, 0x83, 0xba, 0xf9, 0xf2, 0x50, 0xde, 0xe6, 0x41, 0xfb, 0x87,
0x7c, 0x55, 0x6f, 0xfb, 0x71, 0x38, 0xe3, 0xeb, 0x2a, 0xed, 0x9e, 0x80, 0xfe, 0x3b, 0x03, 0xd3,
0xa0, 0xf4, 0x15, 0x67, 0x7a, 0x81, 0x9a, 0x92, 0x8f, 0xac, 0x05, 0xea, 0x9d, 0x33, 0xbd, 0x45,
0x9a, 0x4b, 0x83, 0xcf, 0xc1, 0xdb, 0xe2, 0xeb, 0x82, 0xf1, 0x05, 0x76, 0x86, 0x1e, 0xba, 0xc8,
0xf1, 0xdb, 0x2d, 0x46, 0x71, 0x3a, 0xcb, 0x16, 0xa8, 0x9e, 0x3f, 0xc6, 0x7b, 0x32, 0xa8, 0x7c,
0x0e, 0xe4, 0xc4, 0xc4, 0x64, 0x12, 0x61, 0x4c, 0x73, 0x54, 0x79, 0x82, 0x24, 0x3f, 0x45, 0xff,
0x2a, 0xbe, 0xa6, 0x49, 0xaa, 0x3c, 0x41, 0x46, 0x94, 0x14, 0x1f, 0x3a, 0xb3, 0xa9, 0x70, 0xc6,
0xff, 0xb4, 0xb8, 0xe4, 0xc7, 0xde, 0x15, 0x46, 0x31, 0xed, 0xa7, 0xc6, 0x13, 0x64, 0xbc, 0x80,
0x56, 0xdb, 0xf7, 0xc5, 0xad, 0xef, 0x22, 0x1d, 0xfe, 0xc7, 0x53, 0x8d, 0xe7, 0xc0, 0x2c, 0xc7,
0x77, 0x71, 0xfa, 0x17, 0xda, 0xef, 0x05, 0x68, 0xd8, 0x61, 0x28, 0xc2, 0x8c, 0x0c, 0x25, 0x4e,
0xe2, 0x36, 0x07, 0x4b, 0x73, 0x29, 0x7b, 0xbd, 0x23, 0x50, 0x5c, 0x31, 0x46, 0xba, 0xc4, 0x96,
0xf9, 0x98, 0x22, 0x90, 0x2d, 0x36, 0x07, 0x96, 0x18, 0x23, 0x27, 0xa1, 0xb1, 0x0f, 0xb5, 0x05,
0xc5, 0x74, 0x68, 0x0d, 0x7b, 0xb6, 0x65, 0x5f, 0x70, 0xfb, 0xe3, 0xb9, 0xfd, 0x69, 0x74, 0x71,
0xd2, 0xee, 0xf5, 0xed, 0xae, 0xb6, 0x61, 0x34, 0x61, 0xdb, 0x12, 0x37, 0xc1, 0x14, 0xe3, 0xb4,
0x7b, 0xe3, 0xa7, 0x02, 0x95, 0xb4, 0x45, 0x1d, 0x2a, 0x77, 0x18, 0x46, 0x9e, 0xf0, 0x93, 0x3c,
0xa4, 0x90, 0xed, 0x83, 0x12, 0xcf, 0x82, 0x79, 0x24, 0xb6, 0xcc, 0x26, 0x35, 0x94, 0xf6, 0x32,
0x9a, 0x05, 0xc8, 0xe9, 0x35, 0x3b, 0x86, 0x6a, 0x1a, 0x7c, 0xba, 0x50, 0xdd, 0x6c, 0xad, 0x8b,
0x2f, 0x5f, 0xa8, 0xd8, 0x3b, 0x68, 0x04, 0x99, 0x48, 0xd1, 0x8d, 0xeb, 0xa6, 0x4e, 0xae, 0x35,
0x59, 0xe3, 0x39, 0xf5, 0xc2, 0x9d, 0x64, 0x86, 0x96, 0x9b, 0x73, 0xe7, 0xc3, 0xc4, 0x73, 0x6a,
0xf6, 0x1e, 0x36, 0x9d, 0xec, 0xf2, 0xe9, 0x9f, 0x59, 0x37, 0x1f, 0x91, 0x7d, 0x5d, 0x2c, 0x78,
0x5e, 0xcf, 0xde, 0x40, 0xdd, 0x5d, 0xe6, 0x41, 0xaf, 0x90, 0xfd, 0x7f, 0xb2, 0xaf, 0xe6, 0x84,
0x67, 0xb5, 0xec, 0x59, 0x9a, 0x86, 0x2a, 0x99, 0x9a, 0x2b, 0x2b, 0x4e, 0x03, 0x72, 0x0c, 0x55,
0x37, 0x59, 0x99, 0x5e, 0xcb, 0x8c, 0xf4, 0xc1, 0x1e, 0xf9, 0x42, 0x65, 0xdc, 0x83, 0x22, 0x57,
0xc2, 0x1a, 0x50, 0xed, 0xf4, 0x46, 0x27, 0x3d, 0xbb, 0xdf, 0xd5, 0x36, 0x58, 0x13, 0x36, 0x73,
0xa1, 0xd0, 0x0a, 0x4b, 0x6a, 0xd8, 0xfe, 0xdc, 0x1f, 0xb4, 0xbb, 0x5a, 0x51, 0x52, 0xed, 0xb3,
0xb3, 0xc1, 0xb9, 0x24, 0xe5, 0x2b, 0xad, 0xc4, 0x34, 0x68, 0x58, 0xed, 0x33, 0xcb, 0xee, 0x27,
0x8c, 0xc2, 0x6a, 0xa0, 0xda, 0x9c, 0x0f, 0xb8, 0xa6, 0xca, 0x33, 0xac, 0xc1, 0xe9, 0xb0, 0x6f,
0x8f, 0x6c, 0xad, 0x7c, 0x59, 0xa6, 0x8f, 0xee, 0xab, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd,
0x11, 0xd8, 0xe6, 0x8b, 0x05, 0x00, 0x00,
}
| { proto.RegisterFile("proto/p2p/p2p.proto", fileDescriptor0) } | identifier_body |
p2p.pb.go | // Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go.
// source: proto/p2p/p2p.proto
// DO NOT EDIT!
/*
Package p2p is a generated protocol buffer package.
It is generated from these files:
proto/p2p/p2p.proto
It has these top-level messages:
BitfieldMessage
PieceRequestMessage
PiecePayloadMessage
AnnouncePieceMessage
CancelPieceMessage
ErrorMessage
CompleteMessage
Message
*/
package p2p
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ErrorMessage_ErrorCode int32
const (
ErrorMessage_PIECE_REQUEST_FAILED ErrorMessage_ErrorCode = 0
)
var ErrorMessage_ErrorCode_name = map[int32]string{
0: "PIECE_REQUEST_FAILED",
}
var ErrorMessage_ErrorCode_value = map[string]int32{
"PIECE_REQUEST_FAILED": 0,
}
func (x ErrorMessage_ErrorCode) String() string {
return proto.EnumName(ErrorMessage_ErrorCode_name, int32(x))
}
func (ErrorMessage_ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 0} }
type Message_Type int32
const (
Message_BITFIELD Message_Type = 0
Message_PIECE_REQUEST Message_Type = 1
Message_PIECE_PAYLOAD Message_Type = 2
Message_ANNOUCE_PIECE Message_Type = 3
Message_CANCEL_PIECE Message_Type = 4
Message_ERROR Message_Type = 5
Message_COMPLETE Message_Type = 6
)
var Message_Type_name = map[int32]string{
0: "BITFIELD",
1: "PIECE_REQUEST",
2: "PIECE_PAYLOAD",
3: "ANNOUCE_PIECE",
4: "CANCEL_PIECE",
5: "ERROR",
6: "COMPLETE",
}
var Message_Type_value = map[string]int32{
"BITFIELD": 0,
"PIECE_REQUEST": 1,
"PIECE_PAYLOAD": 2,
"ANNOUCE_PIECE": 3,
"CANCEL_PIECE": 4,
"ERROR": 5,
"COMPLETE": 6,
}
func (x Message_Type) String() string {
return proto.EnumName(Message_Type_name, int32(x))
}
func (Message_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} }
// Binary set of all pieces that peer has downloaded so far. Also serves as a
// handshaking message, which each peer sends once at the beginning of the
// connection to declare what their peer id is and what info hash they want to
// transmit.
type BitfieldMessage struct {
InfoHash string `protobuf:"bytes,2,opt,name=infoHash" json:"infoHash,omitempty"`
// TODO: Torrent name is the content hash. Current torrent storage is
// content addressable. Adding name as a part of handshake makes looking
// up torrents faster. If storage supports addressing torrent by infohash,
// this extra field should removed.
// XXX(codyg): We rely on this name field for announcing too, so tracker can
// look up origins that have this content.
// We currently treat infohash as verification of torrents.
Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
PeerID string `protobuf:"bytes,4,opt,name=peerID" json:"peerID,omitempty"`
BitfieldBytes []byte `protobuf:"bytes,5,opt,name=bitfieldBytes,proto3" json:"bitfieldBytes,omitempty"`
Namespace string `protobuf:"bytes,6,opt,name=namespace" json:"namespace,omitempty"`
// remoteBitfieldBytes contains the binary sets of pieces downloaded of
// all peers that the sender is currently connected to.
RemoteBitfieldBytes map[string][]byte `protobuf:"bytes,7,rep,name=remoteBitfieldBytes" json:"remoteBitfieldBytes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *BitfieldMessage) Reset() { *m = BitfieldMessage{} }
func (m *BitfieldMessage) String() string { return proto.CompactTextString(m) }
func (*BitfieldMessage) ProtoMessage() {}
func (*BitfieldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *BitfieldMessage) GetRemoteBitfieldBytes() map[string][]byte {
if m != nil |
return nil
}
// Requests a piece of the given index. Note: offset and length are unused fields
// and if set, will be rejected.
type PieceRequestMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
Offset int32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
Length int32 `protobuf:"varint,4,opt,name=length" json:"length,omitempty"`
}
func (m *PieceRequestMessage) Reset() { *m = PieceRequestMessage{} }
func (m *PieceRequestMessage) String() string { return proto.CompactTextString(m) }
func (*PieceRequestMessage) ProtoMessage() {}
func (*PieceRequestMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
// Provides binary payload response to a peer request. Always immediately followed
// by a binary blob sent over socket, so the receiver should be ready to treat the
// blob as a non-protobuf message.
type PiecePayloadMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
Offset int32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
Length int32 `protobuf:"varint,4,opt,name=length" json:"length,omitempty"`
Digest string `protobuf:"bytes,5,opt,name=digest" json:"digest,omitempty"`
}
func (m *PiecePayloadMessage) Reset() { *m = PiecePayloadMessage{} }
func (m *PiecePayloadMessage) String() string { return proto.CompactTextString(m) }
func (*PiecePayloadMessage) ProtoMessage() {}
func (*PiecePayloadMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
// Announces that a piece is available to other peers.
type AnnouncePieceMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
}
func (m *AnnouncePieceMessage) Reset() { *m = AnnouncePieceMessage{} }
func (m *AnnouncePieceMessage) String() string { return proto.CompactTextString(m) }
func (*AnnouncePieceMessage) ProtoMessage() {}
func (*AnnouncePieceMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// Unused.
type CancelPieceMessage struct {
Index int32 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"`
}
func (m *CancelPieceMessage) Reset() { *m = CancelPieceMessage{} }
func (m *CancelPieceMessage) String() string { return proto.CompactTextString(m) }
func (*CancelPieceMessage) ProtoMessage() {}
func (*CancelPieceMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
// General purpose error message. Receivers may check the error code to determine
// the origin of the message.
type ErrorMessage struct {
Error string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
Index int32 `protobuf:"varint,3,opt,name=index" json:"index,omitempty"`
Code ErrorMessage_ErrorCode `protobuf:"varint,4,opt,name=code,enum=p2p.ErrorMessage_ErrorCode" json:"code,omitempty"`
}
func (m *ErrorMessage) Reset() { *m = ErrorMessage{} }
func (m *ErrorMessage) String() string { return proto.CompactTextString(m) }
func (*ErrorMessage) ProtoMessage() {}
func (*ErrorMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
// Notifies other peers that the torrent has completed and all pieces are available.
type CompleteMessage struct {
}
func (m *CompleteMessage) Reset() { *m = CompleteMessage{} }
func (m *CompleteMessage) String() string { return proto.CompactTextString(m) }
func (*CompleteMessage) ProtoMessage() {}
func (*CompleteMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type Message struct {
Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"`
Type Message_Type `protobuf:"varint,2,opt,name=type,enum=p2p.Message_Type" json:"type,omitempty"`
Bitfield *BitfieldMessage `protobuf:"bytes,3,opt,name=bitfield" json:"bitfield,omitempty"`
PieceRequest *PieceRequestMessage `protobuf:"bytes,4,opt,name=pieceRequest" json:"pieceRequest,omitempty"`
PiecePayload *PiecePayloadMessage `protobuf:"bytes,5,opt,name=piecePayload" json:"piecePayload,omitempty"`
AnnouncePiece *AnnouncePieceMessage `protobuf:"bytes,6,opt,name=announcePiece" json:"announcePiece,omitempty"`
CancelPiece *CancelPieceMessage `protobuf:"bytes,7,opt,name=cancelPiece" json:"cancelPiece,omitempty"`
Error *ErrorMessage `protobuf:"bytes,8,opt,name=error" json:"error,omitempty"`
Complete *CompleteMessage `protobuf:"bytes,9,opt,name=complete" json:"complete,omitempty"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Message) GetBitfield() *BitfieldMessage {
if m != nil {
return m.Bitfield
}
return nil
}
func (m *Message) GetPieceRequest() *PieceRequestMessage {
if m != nil {
return m.PieceRequest
}
return nil
}
func (m *Message) GetPiecePayload() *PiecePayloadMessage {
if m != nil {
return m.PiecePayload
}
return nil
}
func (m *Message) GetAnnouncePiece() *AnnouncePieceMessage {
if m != nil {
return m.AnnouncePiece
}
return nil
}
func (m *Message) GetCancelPiece() *CancelPieceMessage {
if m != nil {
return m.CancelPiece
}
return nil
}
func (m *Message) GetError() *ErrorMessage {
if m != nil {
return m.Error
}
return nil
}
func (m *Message) GetComplete() *CompleteMessage {
if m != nil {
return m.Complete
}
return nil
}
func init() {
proto.RegisterType((*BitfieldMessage)(nil), "p2p.BitfieldMessage")
proto.RegisterType((*PieceRequestMessage)(nil), "p2p.PieceRequestMessage")
proto.RegisterType((*PiecePayloadMessage)(nil), "p2p.PiecePayloadMessage")
proto.RegisterType((*AnnouncePieceMessage)(nil), "p2p.AnnouncePieceMessage")
proto.RegisterType((*CancelPieceMessage)(nil), "p2p.CancelPieceMessage")
proto.RegisterType((*ErrorMessage)(nil), "p2p.ErrorMessage")
proto.RegisterType((*CompleteMessage)(nil), "p2p.CompleteMessage")
proto.RegisterType((*Message)(nil), "p2p.Message")
proto.RegisterEnum("p2p.ErrorMessage_ErrorCode", ErrorMessage_ErrorCode_name, ErrorMessage_ErrorCode_value)
proto.RegisterEnum("p2p.Message_Type", Message_Type_name, Message_Type_value)
}
func init() { proto.RegisterFile("proto/p2p/p2p.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 647 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x6f, 0xd3, 0x4e,
0x10, 0x6d, 0x12, 0x3b, 0x7f, 0x26, 0x69, 0xeb, 0x6c, 0xa3, 0xdf, 0xcf, 0x14, 0x0e, 0x95, 0x45,
0x45, 0x85, 0xa0, 0xad, 0xcc, 0x05, 0x10, 0x12, 0x4a, 0x1c, 0x57, 0x44, 0x4a, 0x9b, 0xb0, 0xa4,
0x07, 0xc4, 0xa1, 0x72, 0x9d, 0x49, 0x6b, 0x91, 0x7a, 0x8d, 0xed, 0x56, 0xcd, 0xd7, 0xe0, 0x43,
0x21, 0xf1, 0xad, 0xd0, 0x4e, 0xec, 0xc4, 0x6e, 0x02, 0xe2, 0xc0, 0x21, 0x92, 0xdf, 0xf3, 0x7b,
0xb3, 0xb3, 0x33, 0x2f, 0x86, 0x9d, 0x20, 0x14, 0xb1, 0x38, 0x0a, 0xcc, 0x40, 0xfe, 0x0e, 0x09,
0xb1, 0x52, 0x60, 0x06, 0xc6, 0x8f, 0x22, 0x6c, 0x77, 0xbc, 0x78, 0xe2, 0xe1, 0x74, 0x7c, 0x8a,
0x51, 0xe4, 0x5c, 0x21, 0xdb, 0x85, 0xaa, 0xe7, 0x4f, 0xc4, 0x07, 0x27, 0xba, 0xd6, 0x8b, 0x7b,
0x85, 0x83, 0x1a, 0x5f, 0x60, 0xc6, 0x40, 0xf1, 0x9d, 0x1b, 0xd4, 0x4b, 0xc4, 0xd3, 0x33, 0xfb,
0x0f, 0xca, 0x01, 0x62, 0xd8, 0xeb, 0xea, 0x0a, 0xb1, 0x09, 0x62, 0x4f, 0x61, 0xf3, 0x32, 0x29,
0xdd, 0x99, 0xc5, 0x18, 0xe9, 0xea, 0x5e, 0xe1, 0xa0, 0xc1, 0xf3, 0x24, 0x7b, 0x02, 0x35, 0x59,
0x25, 0x0a, 0x1c, 0x17, 0xf5, 0x32, 0x15, 0x58, 0x12, 0xec, 0x02, 0x76, 0x42, 0xbc, 0x11, 0x31,
0x76, 0x72, 0x95, 0x2a, 0x7b, 0xa5, 0x83, 0xba, 0xf9, 0xf2, 0x50, 0xde, 0xe6, 0x41, 0xfb, 0x87,
0x7c, 0x55, 0x6f, 0xfb, 0x71, 0x38, 0xe3, 0xeb, 0x2a, 0xed, 0x9e, 0x80, 0xfe, 0x3b, 0x03, 0xd3,
0xa0, 0xf4, 0x15, 0x67, 0x7a, 0x81, 0x9a, 0x92, 0x8f, 0xac, 0x05, 0xea, 0x9d, 0x33, 0xbd, 0x45,
0x9a, 0x4b, 0x83, 0xcf, 0xc1, 0xdb, 0xe2, 0xeb, 0x82, 0xf1, 0x05, 0x76, 0x86, 0x1e, 0xba, 0xc8,
0xf1, 0xdb, 0x2d, 0x46, 0x71, 0x3a, 0xcb, 0x16, 0xa8, 0x9e, 0x3f, 0xc6, 0x7b, 0x32, 0xa8, 0x7c,
0x0e, 0xe4, 0xc4, 0xc4, 0x64, 0x12, 0x61, 0x4c, 0x73, 0x54, 0x79, 0x82, 0x24, 0x3f, 0x45, 0xff,
0x2a, 0xbe, 0xa6, 0x49, 0xaa, 0x3c, 0x41, 0x46, 0x94, 0x14, 0x1f, 0x3a, 0xb3, 0xa9, 0x70, 0xc6,
0xff, 0xb4, 0xb8, 0xe4, 0xc7, 0xde, 0x15, 0x46, 0x31, 0xed, 0xa7, 0xc6, 0x13, 0x64, 0xbc, 0x80,
0x56, 0xdb, 0xf7, 0xc5, 0xad, 0xef, 0x22, 0x1d, 0xfe, 0xc7, 0x53, 0x8d, 0xe7, 0xc0, 0x2c, 0xc7,
0x77, 0x71, 0xfa, 0x17, 0xda, 0xef, 0x05, 0x68, 0xd8, 0x61, 0x28, 0xc2, 0x8c, 0x0c, 0x25, 0x4e,
0xe2, 0x36, 0x07, 0x4b, 0x73, 0x29, 0x7b, 0xbd, 0x23, 0x50, 0x5c, 0x31, 0x46, 0xba, 0xc4, 0x96,
0xf9, 0x98, 0x22, 0x90, 0x2d, 0x36, 0x07, 0x96, 0x18, 0x23, 0x27, 0xa1, 0xb1, 0x0f, 0xb5, 0x05,
0xc5, 0x74, 0x68, 0x0d, 0x7b, 0xb6, 0x65, 0x5f, 0x70, 0xfb, 0xe3, 0xb9, 0xfd, 0x69, 0x74, 0x71,
0xd2, 0xee, 0xf5, 0xed, 0xae, 0xb6, 0x61, 0x34, 0x61, 0xdb, 0x12, 0x37, 0xc1, 0x14, 0xe3, 0xb4,
0x7b, 0xe3, 0xa7, 0x02, 0x95, 0xb4, 0x45, 0x1d, 0x2a, 0x77, 0x18, 0x46, 0x9e, 0xf0, 0x93, 0x3c,
0xa4, 0x90, 0xed, 0x83, 0x12, 0xcf, 0x82, 0x79, 0x24, 0xb6, 0xcc, 0x26, 0x35, 0x94, 0xf6, 0x32,
0x9a, 0x05, 0xc8, 0xe9, 0x35, 0x3b, 0x86, 0x6a, 0x1a, 0x7c, 0xba, 0x50, 0xdd, 0x6c, 0xad, 0x8b,
0x2f, 0x5f, 0xa8, 0xd8, 0x3b, 0x68, 0x04, 0x99, 0x48, 0xd1, 0x8d, 0xeb, 0xa6, 0x4e, 0xae, 0x35,
0x59, 0xe3, 0x39, 0xf5, 0xc2, 0x9d, 0x64, 0x86, 0x96, 0x9b, 0x73, 0xe7, 0xc3, 0xc4, 0x73, 0x6a,
0xf6, 0x1e, 0x36, 0x9d, 0xec, 0xf2, 0xe9, 0x9f, 0x59, 0x37, 0x1f, 0x91, 0x7d, 0x5d, 0x2c, 0x78,
0x5e, 0xcf, 0xde, 0x40, 0xdd, 0x5d, 0xe6, 0x41, 0xaf, 0x90, 0xfd, 0x7f, 0xb2, 0xaf, 0xe6, 0x84,
0x67, 0xb5, 0xec, 0x59, 0x9a, 0x86, 0x2a, 0x99, 0x9a, 0x2b, 0x2b, 0x4e, 0x03, 0x72, 0x0c, 0x55,
0x37, 0x59, 0x99, 0x5e, 0xcb, 0x8c, 0xf4, 0xc1, 0x1e, 0xf9, 0x42, 0x65, 0xdc, 0x83, 0x22, 0x57,
0xc2, 0x1a, 0x50, 0xed, 0xf4, 0x46, 0x27, 0x3d, 0xbb, 0xdf, 0xd5, 0x36, 0x58, 0x13, 0x36, 0x73,
0xa1, 0xd0, 0x0a, 0x4b, 0x6a, 0xd8, 0xfe, 0xdc, 0x1f, 0xb4, 0xbb, 0x5a, 0x51, 0x52, 0xed, 0xb3,
0xb3, 0xc1, 0xb9, 0x24, 0xe5, 0x2b, 0xad, 0xc4, 0x34, 0x68, 0x58, 0xed, 0x33, 0xcb, 0xee, 0x27,
0x8c, 0xc2, 0x6a, 0xa0, 0xda, 0x9c, 0x0f, 0xb8, 0xa6, 0xca, 0x33, 0xac, 0xc1, 0xe9, 0xb0, 0x6f,
0x8f, 0x6c, 0xad, 0x7c, 0x59, 0xa6, 0x8f, 0xee, 0xab, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd,
0x11, 0xd8, 0xe6, 0x8b, 0x05, 0x00, 0x00,
}
| {
return m.RemoteBitfieldBytes
} | conditional_block |
forest.go | // Package forest defines the Forest type.
package forest
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
api "github.com/kubernetes-sigs/multi-tenancy/incubator/hnc/api/v1alpha1"
)
var (
// OutOfSync is used to report a precondition failure. It's not (currently) returned from this
// package but is used externally.
OutOfSync = errors.New("The forest is out of sync with itself")
)
// TypeSyncer syncs objects of a specific type. Reconcilers implement the interface so that they can be
// called by the HierarchyReconciler if the hierarchy changes.
type TypeSyncer interface {
// SyncNamespace syncs objects of a namespace for a specific type.
SyncNamespace(context.Context, logr.Logger, string) error
// Provides the GVK that is handled by the reconciler who implements the interface.
GetGVK() schema.GroupVersionKind
// SetMode sets the propagation mode of objects that are handled by the reconciler who implements the interface.
// The method also syncs objects in the cluster for the type handled by the reconciler if necessary.
SetMode(context.Context, api.SynchronizationMode, logr.Logger) error
// GetMode gets the propagation mode of objects that are handled by the reconciler who implements the interface.
GetMode() api.SynchronizationMode
// GetNumPropagatedObjects returns the number of propagated objects on the apiserver.
GetNumPropagatedObjects() int
}
// NumObjectsSyncer syncs the number of propagated and source objects. ConfigReconciler implements the
// interface so that it can be called by an ObjectReconciler if the number of propagated or source objects is changed.
type NumObjectsSyncer interface {
SyncNumObjects(logr.Logger)
}
// Forest defines a forest of namespaces - that is, a set of trees. It includes methods to mutate
// the forest legally (ie, prevent cycles).
//
// The forest should always be locked/unlocked (via the `Lock` and `Unlock` methods) while it's
// being mutated to avoid different controllers from making inconsistent changes.
type Forest struct {
lock sync.Mutex
namespaces namedNamespaces
// types is a list of other reconcilers that HierarchyReconciler can call if the hierarchy
// changes. This will force all objects to be re-propagated.
//
// This is probably wildly inefficient, and we can probably make better use of things like
// owner references to make this better. But for a PoC, it works just fine.
//
// We put the list in the forest because the access to the list is guarded by the forest lock.
// We can also move the lock out of the forest and pass it to all reconcilers that need the lock.
// In that way, we don't need to put the list in the forest.
types []TypeSyncer
// ObjectsStatusSyncer is the ConfigReconciler that an object reconciler can call if the status of the HNCConfiguration
// object needs to be updated.
ObjectsStatusSyncer NumObjectsSyncer
}
func NewForest() *Forest {
return &Forest{
namespaces: namedNamespaces{},
types: []TypeSyncer{},
}
}
func (f *Forest) Lock() {
f.lock.Lock()
}
func (f *Forest) Unlock() {
f.lock.Unlock()
}
// AddTypeSyncer adds a reconciler to the types list.
func (f *Forest) AddTypeSyncer(nss TypeSyncer) {
f.types = append(f.types, nss)
}
// GetTypeSyncer returns the reconciler for the given GVK or nil if the reconciler
// does not exist.
func (f *Forest) GetTypeSyncer(gvk schema.GroupVersionKind) TypeSyncer {
for _, t := range f.types {
if t.GetGVK() == gvk {
return t
}
}
return nil
}
// GetTypeSyncers returns the types list.
// Retuns a copy here so that the caller does not need to hold the mutex while accessing the returned value and can modify the
// returned value without fear of corrupting the original types list.
func (f *Forest) GetTypeSyncers() []TypeSyncer {
types := make([]TypeSyncer, len(f.types))
copy(types, f.types)
return types
}
// Get returns a `Namespace` object representing a namespace in K8s.
func (f *Forest) Get(nm string) *Namespace {
if nm == "" {
// Useful in cases where "no parent" is represented by an empty string, e.g. in the HC's
// .spec.parent field.
return nil
}
ns, ok := f.namespaces[nm]
if ok {
return ns
}
ns = &Namespace{
forest: f,
name: nm,
children: namedNamespaces{},
conditions: conditions{},
originalObjects: objects{},
}
f.namespaces[nm] = ns
return ns
}
// GetNamespaceNames returns names of all namespaces in the cluster.
func (f *Forest) GetNamespaceNames() []string {
names := []string{}
for nm := range f.namespaces {
names = append(names, nm)
}
return names
}
type namedNamespaces map[string]*Namespace
// While storing the V in GVK is not strictly necessary to match what's in the HNC type configuration,
// as a client of the API server, HNC will be to be reading and writing versions of the API to communicate
// with the API server. Since we need the V to work with the API server anyways anyways, we will choose to
// use the GVK as the key in this map.
type objects map[schema.GroupVersionKind]map[string]*unstructured.Unstructured
// conditions stores the conditions for a single namespace, in the form obj -> code -> msg. Note
// that only one message can be stored per obj and code.
type conditions map[api.AffectedObject]map[api.Code]string
// Namespace represents a namespace in a forest. Other than its structure, it contains some
// properties useful to the reconcilers.
type Namespace struct {
forest *Forest
name string
parent *Namespace
children namedNamespaces
exists bool
allowCascadingDelete bool
// originalObjects store the objects created by users, identified by GVK and name.
// It serves as the source of truth for object controllers to propagate objects.
originalObjects objects
// conditions store conditions so that object propagation can be disabled if there's a problem
// on this namespace.
conditions conditions
// IsSub indicates that this namespace is being or was created solely to live as a
// subnamespace of the specified parent.
IsSub bool
// Anchors store a list of anchors in the namespace.
Anchors []string
}
// Exists returns true if the namespace exists.
func (ns *Namespace) Exists() bool {
return ns.exists
}
// SetExists marks this namespace as existing, returning true if didn't previously exist.
func (ns *Namespace) SetExists() bool {
changed := !ns.exists
ns.exists = true
return changed
}
// UnsetExists marks this namespace as missing, returning true if it previously existed. It also
// removes it from its parent, if any, since a nonexistent namespace can't have a parent.
func (ns *Namespace) UnsetExists() bool {
changed := ns.exists
ns.SetParent(nil) // Unreconciled namespaces can't specify parents
ns.exists = false
ns.clean() // clean up if this is a useless data structure
return changed
}
// clean garbage collects this namespace if it has a zero value.
func (ns *Namespace) clean() {
// Don't clean up something that either exists or is otherwise referenced.
if ns.exists || len(ns.children) > 0 {
return
}
// Remove from the forest.
delete(ns.forest.namespaces, ns.name)
}
// UpdateAllowCascadingDelete updates if this namespace allows cascading deletion.
func (ns *Namespace) UpdateAllowCascadingDelete(acd bool) {
ns.allowCascadingDelete = acd
}
// AllowsCascadingDelete returns if the namespace's or any of the owner ancestors'
// allowCascadingDelete field is set to true.
func (ns *Namespace) AllowsCascadingDelete() bool |
// SetParent modifies the namespace's parent, including updating the list of children. It may result
// in a cycle being created; this can be prevented by calling CanSetParent before, or seeing if it
// happened by calling CycleNames afterwards.
func (ns *Namespace) SetParent(p *Namespace) {
// Remove old parent and cleans it up.
if ns.parent != nil {
delete(ns.parent.children, ns.name)
if len(ns.parent.children) == 0 {
ns.parent.clean()
}
}
// Update new parent.
ns.parent = p
if p != nil {
p.children[ns.name] = ns
}
}
// CanSetParent returns the empty string if the assignment is currently legal, or a non-empty string
// indicating the reason if it cannot be done.
func (ns *Namespace) CanSetParent(p *Namespace) string {
if p == nil {
return ""
}
// Simple case
if p == ns {
return fmt.Sprintf("%q cannot be set as its own parent", p.name)
}
// Check for cycles; see if the current namespace (the proposed child) is already an ancestor of
// the proposed parent. Start at the end of the ancestry (e.g. at the proposed parent) and work
// our way up to the root.
ancestors := p.AncestryNames()
cycle := []string{}
found := false
for i := len(ancestors) - 1; !found && i >= 0; i-- {
cycle = append(cycle, ancestors[i])
found = (ancestors[i] == ns.name)
}
if found {
return fmt.Sprintf("cycle when making %q the parent of %q: current ancestry is %s",
p.name, ns.name, strings.Join(cycle, " -> "))
}
return ""
}
func (ns *Namespace) Name() string {
if ns == nil {
return "<none>"
}
return ns.name
}
func (ns *Namespace) Parent() *Namespace {
return ns.parent
}
// ChildNames returns a sorted list of names or nil if there are no children.
func (ns *Namespace) ChildNames() []string {
if len(ns.children) == 0 {
return nil
}
nms := []string{}
for k := range ns.children {
nms = append(nms, k)
}
sort.Strings(nms)
return nms
}
// RelativesNames returns the children and parent.
func (ns *Namespace) RelativesNames() []string {
a := []string{}
if ns.parent != nil {
a = append(a, ns.parent.name)
}
for k := range ns.children {
a = append(a, k)
}
return a
}
// AncestryNames returns all ancestors of this namespace. The namespace itself is the last element
// of the returned slice, with the root at the beginning of the list.
//
// This method is cycle-safe, and can be used to detect and recover from cycles. If there's a cycle,
// the first ancestor that's a member of the cycle we encounter is repeated at the beginning of the
// list.
func (ns *Namespace) AncestryNames() []string {
if ns == nil {
return nil
}
cycleCheck := map[string]bool{ns.name: true}
ancestors := []string{ns.name}
anc := ns.parent
for anc != nil {
ancestors = append([]string{anc.name}, ancestors...)
if cycleCheck[anc.name] {
return ancestors
}
cycleCheck[anc.name] = true
anc = anc.parent
}
return ancestors
}
// CycleNames returns nil if the namespace is not in a cycle, or a list of names in the cycle if
// it is. All namespaces in the cycle return the same list, which is the same as calling
// ns.AncestryNames() on the namespaces with the lexicographically smallest name.
func (ns *Namespace) CycleNames() []string {
// If this namespaces is *in* a cycle, it will be the first repeated element encountered by
// AncestryNames(), and therefore will be both the first and the last element.
ancestors := ns.AncestryNames()
if len(ancestors) == 1 || ancestors[0] != ns.name {
return nil
}
ancestors = ancestors[1:] // don't need the repeated element
// Find the smallest name and where it is
sidx := 0
snm := ancestors[0]
for idx, nm := range ancestors {
if nm < snm {
sidx = idx
snm = nm
}
}
// Rotate the slice, and then duplicate the smallest element
ancestors = append(ancestors[sidx:], ancestors[:sidx]...)
return append(ancestors, snm)
}
// SetAnchors updates the anchors and returns a difference between the new/old list.
func (ns *Namespace) SetAnchors(anchors []string) (diff []string) {
add := make(map[string]bool)
for _, nm := range anchors {
add[nm] = true
}
for _, nm := range ns.Anchors {
if add[nm] {
delete(add, nm)
} else {
// This old anchor is not in the new anchor list.
diff = append(diff, nm)
}
}
for nm, _ := range add {
// This new anchor is not in the old anchor list.
diff = append(diff, nm)
}
ns.Anchors = anchors
return
}
// SetOriginalObject updates or creates the original object in the namespace in the forest.
func (ns *Namespace) SetOriginalObject(obj *unstructured.Unstructured) {
gvk := obj.GroupVersionKind()
name := obj.GetName()
_, ok := ns.originalObjects[gvk]
if !ok {
ns.originalObjects[gvk] = map[string]*unstructured.Unstructured{}
}
ns.originalObjects[gvk][name] = obj
}
// GetOriginalObject gets an original object by name. It returns nil, if the object doesn't exist.
func (ns *Namespace) GetOriginalObject(gvk schema.GroupVersionKind, nm string) *unstructured.Unstructured {
return ns.originalObjects[gvk][nm]
}
// HasOriginalObject returns if the namespace has an original object.
func (ns *Namespace) HasOriginalObject(gvk schema.GroupVersionKind, oo string) bool {
return ns.GetOriginalObject(gvk, oo) != nil
}
// DeleteOriginalObject deletes an original object by name.
func (ns *Namespace) DeleteOriginalObject(gvk schema.GroupVersionKind, nm string) {
delete(ns.originalObjects[gvk], nm)
// Garbage collection
if len(ns.originalObjects[gvk]) == 0 {
delete(ns.originalObjects, gvk)
}
}
// GetOriginalObjects returns all original objects in the namespace.
func (ns *Namespace) GetOriginalObjects(gvk schema.GroupVersionKind) []*unstructured.Unstructured {
o := []*unstructured.Unstructured{}
for _, obj := range ns.originalObjects[gvk] {
o = append(o, obj)
}
return o
}
// GetNumOriginalObjects returns the total number of original objects of a specific GVK in the namespace.
func (ns *Namespace) GetNumOriginalObjects(gvk schema.GroupVersionKind) int {
return len(ns.originalObjects[gvk])
}
// GetPropagatedObjects returns all original copies in the ancestors.
func (ns *Namespace) GetPropagatedObjects(gvk schema.GroupVersionKind) []*unstructured.Unstructured {
o := []*unstructured.Unstructured{}
ans := ns.AncestryNames()
for _, n := range ans {
// Exclude the original objects in this namespace
if n == ns.name {
continue
}
o = append(o, ns.forest.Get(n).GetOriginalObjects(gvk)...)
}
return o
}
// GetSource returns the original copy in the ancestors if it exists.
// Otherwise, return nil.
func (ns *Namespace) GetSource(gvk schema.GroupVersionKind, name string) *unstructured.Unstructured {
pos := ns.GetPropagatedObjects(gvk)
for _, po := range pos {
if po.GetName() == name {
return po
}
}
return nil
}
// IsAncestor is *not* cycle-safe, so should only be called from namespace trees that are known not
// to have cycles.
func (ns *Namespace) IsAncestor(other *Namespace) bool {
if ns.parent == other {
return true
}
if ns.parent == nil {
return false
}
return ns.parent.IsAncestor(other)
}
// HasLocalCritCondition returns if the namespace itself has any local critical conditions, ignoring
// its ancestors. Any code with the "Crit" prefix is a critical condition.
func (ns *Namespace) HasLocalCritCondition() bool {
for code, _ := range ns.conditions[api.AffectedObject{}] {
codeNm := (string)(code)
if strings.HasPrefix(codeNm, "Crit") {
return true
}
}
return false
}
// GetCritAncestor returns the name of the first ancestor with a critical condition, or the empty
// string if there are no such ancestors. It *can* return the name of the current namespace.
func (ns *Namespace) GetCritAncestor() string {
if ns.HasLocalCritCondition() {
return ns.name
}
if ns.Parent() == nil {
return ""
}
return ns.Parent().GetCritAncestor()
}
// HasCondition returns true if there's a condition with the given object and code. If code is the
// empty string, it returns true if there's _any_ condition for the given object.
func (ns *Namespace) HasCondition(obj api.AffectedObject, code api.Code) bool {
if _, exists := ns.conditions[obj]; !exists {
// Nothing for this obj
return false
}
if code == "" {
// Something exists for this obj; we don't care what
return true
}
_, exists := ns.conditions[obj][code]
return exists
}
// ClearCondition clears conditions in the namespace for a single object. If `code` is non-empty, it
// only clears conditions with that code, otherwise it clears all conditions for that object. It
// should only be called by the code that also *sets* the conditions.
//
// It returns true if it made any changes, false otherwise.
func (ns *Namespace) ClearCondition(obj api.AffectedObject, code api.Code) bool {
if !ns.HasCondition(obj, code) {
return false
}
if code == "" {
delete(ns.conditions, obj)
} else {
delete(ns.conditions[obj], code)
}
return true
}
// ClearLocalConditions clears the condition(s) on this namespace.
func (ns *Namespace) ClearLocalConditions() bool {
return ns.ClearCondition(api.AffectedObject{}, "")
}
func (ns *Namespace) ClearObsoleteConditions(log logr.Logger) {
// Load ancestors to check CCCAncestors
isAnc := map[string]bool{}
for _, anc := range ns.AncestryNames() {
// The definition of CCCAncestor doesn't include the namespace itself
if anc != ns.name {
isAnc[anc] = true
}
}
// Load the subtree to check CCCSubtree, including the namespace itself.
isSubtree := map[string]bool{ns.name: true}
for _, dsc := range ns.DescendantNames() {
isSubtree[dsc] = true
}
// For each affected object, remove its condition if that object is no longer relevant.
for obj, codes := range ns.conditions {
for code, _ := range codes {
switch api.ClearConditionCriteria[code] {
case api.CCCManual:
// nop - cleared manually
case api.CCCAncestor:
if !isAnc[obj.Namespace] {
log.Info("Cleared obsolete condition from old ancestor", "obj", obj, "code", code)
ns.ClearCondition(obj, code)
}
case api.CCCSubtree:
if !isSubtree[obj.Namespace] {
log.Info("Cleared obsolete condition from old descendant", "obj", obj, "code", code)
ns.ClearCondition(obj, code)
}
default:
err := errors.New("no ClearConditionCriterion")
log.Error(err, "In clearObsoleteConditions", "code", code, "obj", obj)
}
}
}
}
// SetCondition sets a condition for the specified object and code, returning true if it does not
// exist previously or if the message has changed.
//
// Returns true if the condition wasn't previously set
func (ns *Namespace) SetCondition(obj api.AffectedObject, code api.Code, msg string) bool {
changed := false
if _, existed := ns.conditions[obj]; !existed {
changed = true
ns.conditions[obj] = map[api.Code]string{}
}
if oldMsg, existed := ns.conditions[obj][code]; !existed || msg != oldMsg {
changed = true
ns.conditions[obj][code] = msg
}
return changed
}
// SetLocalCondition sets a condition that applies to the current namespace.
func (ns *Namespace) SetLocalCondition(code api.Code, msg string) bool {
return ns.SetCondition(api.AffectedObject{}, code, msg)
}
// Conditions returns a list of conditions in the namespace in the format expected by the API.
func (ns *Namespace) Conditions() []api.Condition {
// Treat the code/msg combination as a combined key.
type codeMsg struct {
code api.Code
msg string
}
// Reorder so that the objects are grouped by code and message
byCM := map[codeMsg][]api.AffectedObject{}
for obj, codes := range ns.conditions {
for code, msg := range codes {
cm := codeMsg{code: code, msg: msg}
byCM[cm] = append(byCM[cm], obj)
}
}
// Flatten into a list of conditions
conds := []api.Condition{}
for cm, objs := range byCM {
// If the only affected object is unnamed (e.g., it refers to the current namespace), omit it.
c := api.Condition{Code: cm.code, Msg: cm.msg}
if len(objs) > 0 || objs[0].Name != "" {
api.SortAffectedObjects(objs)
c.Affects = objs
}
conds = append(conds, c)
}
sort.Slice(conds, func(i, j int) bool {
if conds[i].Code != conds[j].Code {
return conds[i].Code < conds[j].Code
}
return conds[i].Msg < conds[j].Msg
})
if len(conds) == 0 {
conds = nil // prevent anything from appearing in the status
}
return conds
}
// DescendantNames returns a slice of strings like ["achild", "agrandchild", "bchild", ...] of names
// of all namespaces in its subtree, or nil if the namespace has no descendents. The names are
// returned in alphabetical order (as defined by `sort.Strings()`), *not* depth-first,
// breadth-first, etc.
//
// This method is cycle-safe. If there are cycles, each namespace is only listed once.
func (ns *Namespace) DescendantNames() []string {
ds := map[string]bool{}
ns.populateDescendants(ds)
if len(ds) == 0 {
return nil
}
d := []string{}
for k, _ := range ds {
d = append(d, k)
}
sort.Strings(d)
return d
}
// populateDescendants is a cycle-safe way of finding all descendants of a namespace. If any
// namespace turns out to be its own descendant, it's skipped on subsequent encounters.
func (ns *Namespace) populateDescendants(d map[string]bool) {
for _, c := range ns.ChildNames() {
if d[c] {
continue
}
d[c] = true
cns := ns.forest.Get(c)
cns.populateDescendants(d)
}
}
| {
if ns.allowCascadingDelete == true {
return true
}
if !ns.IsSub {
return false
}
// This is a subnamespace so it must have a non-nil parent. If the parent is missing, it will
// return the default false.
//
// Subnamespaces can never be involved in cycles, since those can only occur at the "top" of a
// tree and subnamespaces cannot be roots by definition. So this line can't cause a stack
// overflow.
return ns.parent.AllowsCascadingDelete()
} | identifier_body |
forest.go | // Package forest defines the Forest type.
package forest
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
api "github.com/kubernetes-sigs/multi-tenancy/incubator/hnc/api/v1alpha1"
)
var (
// OutOfSync is used to report a precondition failure. It's not (currently) returned from this
// package but is used externally.
OutOfSync = errors.New("The forest is out of sync with itself")
)
// TypeSyncer syncs objects of a specific type. Reconcilers implement the interface so that they can be
// called by the HierarchyReconciler if the hierarchy changes.
type TypeSyncer interface {
// SyncNamespace syncs objects of a namespace for a specific type.
SyncNamespace(context.Context, logr.Logger, string) error
// Provides the GVK that is handled by the reconciler who implements the interface.
GetGVK() schema.GroupVersionKind
// SetMode sets the propagation mode of objects that are handled by the reconciler who implements the interface.
// The method also syncs objects in the cluster for the type handled by the reconciler if necessary.
SetMode(context.Context, api.SynchronizationMode, logr.Logger) error
// GetMode gets the propagation mode of objects that are handled by the reconciler who implements the interface.
GetMode() api.SynchronizationMode
// GetNumPropagatedObjects returns the number of propagated objects on the apiserver.
GetNumPropagatedObjects() int
}
// NumObjectsSyncer syncs the number of propagated and source objects. ConfigReconciler implements the
// interface so that it can be called by an ObjectReconciler if the number of propagated or source objects is changed.
type NumObjectsSyncer interface {
SyncNumObjects(logr.Logger)
}
// Forest defines a forest of namespaces - that is, a set of trees. It includes methods to mutate
// the forest legally (ie, prevent cycles).
//
// The forest should always be locked/unlocked (via the `Lock` and `Unlock` methods) while it's
// being mutated to avoid different controllers from making inconsistent changes.
type Forest struct {
lock sync.Mutex
namespaces namedNamespaces
// types is a list of other reconcilers that HierarchyReconciler can call if the hierarchy
// changes. This will force all objects to be re-propagated.
//
// This is probably wildly inefficient, and we can probably make better use of things like
// owner references to make this better. But for a PoC, it works just fine.
//
// We put the list in the forest because the access to the list is guarded by the forest lock.
// We can also move the lock out of the forest and pass it to all reconcilers that need the lock.
// In that way, we don't need to put the list in the forest.
types []TypeSyncer
// ObjectsStatusSyncer is the ConfigReconciler that an object reconciler can call if the status of the HNCConfiguration
// object needs to be updated.
ObjectsStatusSyncer NumObjectsSyncer
}
func NewForest() *Forest {
return &Forest{
namespaces: namedNamespaces{},
types: []TypeSyncer{},
}
}
func (f *Forest) Lock() {
f.lock.Lock()
}
func (f *Forest) Unlock() {
f.lock.Unlock()
}
// AddTypeSyncer adds a reconciler to the types list.
func (f *Forest) AddTypeSyncer(nss TypeSyncer) {
f.types = append(f.types, nss)
}
// GetTypeSyncer returns the reconciler for the given GVK or nil if the reconciler
// does not exist.
func (f *Forest) GetTypeSyncer(gvk schema.GroupVersionKind) TypeSyncer {
for _, t := range f.types {
if t.GetGVK() == gvk {
return t
}
}
return nil
}
// GetTypeSyncers returns the types list.
// Retuns a copy here so that the caller does not need to hold the mutex while accessing the returned value and can modify the
// returned value without fear of corrupting the original types list.
func (f *Forest) GetTypeSyncers() []TypeSyncer {
types := make([]TypeSyncer, len(f.types))
copy(types, f.types)
return types
}
// Get returns a `Namespace` object representing a namespace in K8s.
func (f *Forest) Get(nm string) *Namespace {
if nm == "" {
// Useful in cases where "no parent" is represented by an empty string, e.g. in the HC's
// .spec.parent field.
return nil
}
ns, ok := f.namespaces[nm]
if ok {
return ns
}
ns = &Namespace{
forest: f,
name: nm,
children: namedNamespaces{},
conditions: conditions{},
originalObjects: objects{},
}
f.namespaces[nm] = ns
return ns
}
// GetNamespaceNames returns names of all namespaces in the cluster.
func (f *Forest) GetNamespaceNames() []string {
names := []string{}
for nm := range f.namespaces {
names = append(names, nm)
}
return names
}
type namedNamespaces map[string]*Namespace
// While storing the V in GVK is not strictly necessary to match what's in the HNC type configuration,
// as a client of the API server, HNC will be to be reading and writing versions of the API to communicate
// with the API server. Since we need the V to work with the API server anyways anyways, we will choose to
// use the GVK as the key in this map.
type objects map[schema.GroupVersionKind]map[string]*unstructured.Unstructured
// conditions stores the conditions for a single namespace, in the form obj -> code -> msg. Note
// that only one message can be stored per obj and code.
type conditions map[api.AffectedObject]map[api.Code]string
// Namespace represents a namespace in a forest. Other than its structure, it contains some
// properties useful to the reconcilers.
type Namespace struct {
forest *Forest
name string
parent *Namespace
children namedNamespaces
exists bool
allowCascadingDelete bool
// originalObjects store the objects created by users, identified by GVK and name.
// It serves as the source of truth for object controllers to propagate objects.
originalObjects objects
// conditions store conditions so that object propagation can be disabled if there's a problem
// on this namespace.
conditions conditions
// IsSub indicates that this namespace is being or was created solely to live as a
// subnamespace of the specified parent.
IsSub bool
// Anchors store a list of anchors in the namespace.
Anchors []string
}
// Exists returns true if the namespace exists.
func (ns *Namespace) Exists() bool {
return ns.exists
}
// SetExists marks this namespace as existing, returning true if didn't previously exist.
func (ns *Namespace) SetExists() bool {
changed := !ns.exists
ns.exists = true
return changed
}
// UnsetExists marks this namespace as missing, returning true if it previously existed. It also
// removes it from its parent, if any, since a nonexistent namespace can't have a parent.
func (ns *Namespace) UnsetExists() bool {
changed := ns.exists
ns.SetParent(nil) // Unreconciled namespaces can't specify parents
ns.exists = false
ns.clean() // clean up if this is a useless data structure
return changed
}
// clean garbage collects this namespace if it has a zero value.
func (ns *Namespace) clean() {
// Don't clean up something that either exists or is otherwise referenced.
if ns.exists || len(ns.children) > 0 {
return
}
// Remove from the forest.
delete(ns.forest.namespaces, ns.name)
}
// UpdateAllowCascadingDelete updates if this namespace allows cascading deletion.
func (ns *Namespace) UpdateAllowCascadingDelete(acd bool) {
ns.allowCascadingDelete = acd
}
// AllowsCascadingDelete returns if the namespace's or any of the owner ancestors'
// allowCascadingDelete field is set to true.
func (ns *Namespace) AllowsCascadingDelete() bool {
if ns.allowCascadingDelete == true {
return true
}
if !ns.IsSub {
return false
}
// This is a subnamespace so it must have a non-nil parent. If the parent is missing, it will
// return the default false.
//
// Subnamespaces can never be involved in cycles, since those can only occur at the "top" of a
// tree and subnamespaces cannot be roots by definition. So this line can't cause a stack
// overflow.
return ns.parent.AllowsCascadingDelete()
}
// SetParent modifies the namespace's parent, including updating the list of children. It may result
// in a cycle being created; this can be prevented by calling CanSetParent before, or seeing if it
// happened by calling CycleNames afterwards.
func (ns *Namespace) SetParent(p *Namespace) {
// Remove old parent and cleans it up.
if ns.parent != nil {
delete(ns.parent.children, ns.name)
if len(ns.parent.children) == 0 {
ns.parent.clean()
}
}
// Update new parent.
ns.parent = p
if p != nil {
p.children[ns.name] = ns
}
}
// CanSetParent returns the empty string if the assignment is currently legal, or a non-empty string
// indicating the reason if it cannot be done.
func (ns *Namespace) | (p *Namespace) string {
if p == nil {
return ""
}
// Simple case
if p == ns {
return fmt.Sprintf("%q cannot be set as its own parent", p.name)
}
// Check for cycles; see if the current namespace (the proposed child) is already an ancestor of
// the proposed parent. Start at the end of the ancestry (e.g. at the proposed parent) and work
// our way up to the root.
ancestors := p.AncestryNames()
cycle := []string{}
found := false
for i := len(ancestors) - 1; !found && i >= 0; i-- {
cycle = append(cycle, ancestors[i])
found = (ancestors[i] == ns.name)
}
if found {
return fmt.Sprintf("cycle when making %q the parent of %q: current ancestry is %s",
p.name, ns.name, strings.Join(cycle, " -> "))
}
return ""
}
func (ns *Namespace) Name() string {
if ns == nil {
return "<none>"
}
return ns.name
}
func (ns *Namespace) Parent() *Namespace {
return ns.parent
}
// ChildNames returns a sorted list of names or nil if there are no children.
func (ns *Namespace) ChildNames() []string {
if len(ns.children) == 0 {
return nil
}
nms := []string{}
for k := range ns.children {
nms = append(nms, k)
}
sort.Strings(nms)
return nms
}
// RelativesNames returns the children and parent.
func (ns *Namespace) RelativesNames() []string {
a := []string{}
if ns.parent != nil {
a = append(a, ns.parent.name)
}
for k := range ns.children {
a = append(a, k)
}
return a
}
// AncestryNames returns all ancestors of this namespace. The namespace itself is the last element
// of the returned slice, with the root at the beginning of the list.
//
// This method is cycle-safe, and can be used to detect and recover from cycles. If there's a cycle,
// the first ancestor that's a member of the cycle we encounter is repeated at the beginning of the
// list.
func (ns *Namespace) AncestryNames() []string {
if ns == nil {
return nil
}
cycleCheck := map[string]bool{ns.name: true}
ancestors := []string{ns.name}
anc := ns.parent
for anc != nil {
ancestors = append([]string{anc.name}, ancestors...)
if cycleCheck[anc.name] {
return ancestors
}
cycleCheck[anc.name] = true
anc = anc.parent
}
return ancestors
}
// CycleNames returns nil if the namespace is not in a cycle, or a list of names in the cycle if
// it is. All namespaces in the cycle return the same list, which is the same as calling
// ns.AncestryNames() on the namespaces with the lexicographically smallest name.
func (ns *Namespace) CycleNames() []string {
// If this namespaces is *in* a cycle, it will be the first repeated element encountered by
// AncestryNames(), and therefore will be both the first and the last element.
ancestors := ns.AncestryNames()
if len(ancestors) == 1 || ancestors[0] != ns.name {
return nil
}
ancestors = ancestors[1:] // don't need the repeated element
// Find the smallest name and where it is
sidx := 0
snm := ancestors[0]
for idx, nm := range ancestors {
if nm < snm {
sidx = idx
snm = nm
}
}
// Rotate the slice, and then duplicate the smallest element
ancestors = append(ancestors[sidx:], ancestors[:sidx]...)
return append(ancestors, snm)
}
// SetAnchors updates the anchors and returns a difference between the new/old list.
func (ns *Namespace) SetAnchors(anchors []string) (diff []string) {
add := make(map[string]bool)
for _, nm := range anchors {
add[nm] = true
}
for _, nm := range ns.Anchors {
if add[nm] {
delete(add, nm)
} else {
// This old anchor is not in the new anchor list.
diff = append(diff, nm)
}
}
for nm, _ := range add {
// This new anchor is not in the old anchor list.
diff = append(diff, nm)
}
ns.Anchors = anchors
return
}
// SetOriginalObject updates or creates the original object in the namespace in the forest.
func (ns *Namespace) SetOriginalObject(obj *unstructured.Unstructured) {
gvk := obj.GroupVersionKind()
name := obj.GetName()
_, ok := ns.originalObjects[gvk]
if !ok {
ns.originalObjects[gvk] = map[string]*unstructured.Unstructured{}
}
ns.originalObjects[gvk][name] = obj
}
// GetOriginalObject gets an original object by name. It returns nil, if the object doesn't exist.
func (ns *Namespace) GetOriginalObject(gvk schema.GroupVersionKind, nm string) *unstructured.Unstructured {
return ns.originalObjects[gvk][nm]
}
// HasOriginalObject returns if the namespace has an original object.
func (ns *Namespace) HasOriginalObject(gvk schema.GroupVersionKind, oo string) bool {
return ns.GetOriginalObject(gvk, oo) != nil
}
// DeleteOriginalObject deletes an original object by name.
func (ns *Namespace) DeleteOriginalObject(gvk schema.GroupVersionKind, nm string) {
delete(ns.originalObjects[gvk], nm)
// Garbage collection
if len(ns.originalObjects[gvk]) == 0 {
delete(ns.originalObjects, gvk)
}
}
// GetOriginalObjects returns all original objects in the namespace.
func (ns *Namespace) GetOriginalObjects(gvk schema.GroupVersionKind) []*unstructured.Unstructured {
o := []*unstructured.Unstructured{}
for _, obj := range ns.originalObjects[gvk] {
o = append(o, obj)
}
return o
}
// GetNumOriginalObjects returns the total number of original objects of a specific GVK in the namespace.
func (ns *Namespace) GetNumOriginalObjects(gvk schema.GroupVersionKind) int {
return len(ns.originalObjects[gvk])
}
// GetPropagatedObjects returns all original copies in the ancestors.
func (ns *Namespace) GetPropagatedObjects(gvk schema.GroupVersionKind) []*unstructured.Unstructured {
o := []*unstructured.Unstructured{}
ans := ns.AncestryNames()
for _, n := range ans {
// Exclude the original objects in this namespace
if n == ns.name {
continue
}
o = append(o, ns.forest.Get(n).GetOriginalObjects(gvk)...)
}
return o
}
// GetSource returns the original copy in the ancestors if it exists.
// Otherwise, return nil.
func (ns *Namespace) GetSource(gvk schema.GroupVersionKind, name string) *unstructured.Unstructured {
pos := ns.GetPropagatedObjects(gvk)
for _, po := range pos {
if po.GetName() == name {
return po
}
}
return nil
}
// IsAncestor is *not* cycle-safe, so should only be called from namespace trees that are known not
// to have cycles.
func (ns *Namespace) IsAncestor(other *Namespace) bool {
if ns.parent == other {
return true
}
if ns.parent == nil {
return false
}
return ns.parent.IsAncestor(other)
}
// HasLocalCritCondition returns if the namespace itself has any local critical conditions, ignoring
// its ancestors. Any code with the "Crit" prefix is a critical condition.
func (ns *Namespace) HasLocalCritCondition() bool {
for code, _ := range ns.conditions[api.AffectedObject{}] {
codeNm := (string)(code)
if strings.HasPrefix(codeNm, "Crit") {
return true
}
}
return false
}
// GetCritAncestor returns the name of the first ancestor with a critical condition, or the empty
// string if there are no such ancestors. It *can* return the name of the current namespace.
func (ns *Namespace) GetCritAncestor() string {
if ns.HasLocalCritCondition() {
return ns.name
}
if ns.Parent() == nil {
return ""
}
return ns.Parent().GetCritAncestor()
}
// HasCondition returns true if there's a condition with the given object and code. If code is the
// empty string, it returns true if there's _any_ condition for the given object.
func (ns *Namespace) HasCondition(obj api.AffectedObject, code api.Code) bool {
if _, exists := ns.conditions[obj]; !exists {
// Nothing for this obj
return false
}
if code == "" {
// Something exists for this obj; we don't care what
return true
}
_, exists := ns.conditions[obj][code]
return exists
}
// ClearCondition clears conditions in the namespace for a single object. If `code` is non-empty, it
// only clears conditions with that code, otherwise it clears all conditions for that object. It
// should only be called by the code that also *sets* the conditions.
//
// It returns true if it made any changes, false otherwise.
func (ns *Namespace) ClearCondition(obj api.AffectedObject, code api.Code) bool {
if !ns.HasCondition(obj, code) {
return false
}
if code == "" {
delete(ns.conditions, obj)
} else {
delete(ns.conditions[obj], code)
}
return true
}
// ClearLocalConditions clears the condition(s) on this namespace.
func (ns *Namespace) ClearLocalConditions() bool {
return ns.ClearCondition(api.AffectedObject{}, "")
}
func (ns *Namespace) ClearObsoleteConditions(log logr.Logger) {
// Load ancestors to check CCCAncestors
isAnc := map[string]bool{}
for _, anc := range ns.AncestryNames() {
// The definition of CCCAncestor doesn't include the namespace itself
if anc != ns.name {
isAnc[anc] = true
}
}
// Load the subtree to check CCCSubtree, including the namespace itself.
isSubtree := map[string]bool{ns.name: true}
for _, dsc := range ns.DescendantNames() {
isSubtree[dsc] = true
}
// For each affected object, remove its condition if that object is no longer relevant.
for obj, codes := range ns.conditions {
for code, _ := range codes {
switch api.ClearConditionCriteria[code] {
case api.CCCManual:
// nop - cleared manually
case api.CCCAncestor:
if !isAnc[obj.Namespace] {
log.Info("Cleared obsolete condition from old ancestor", "obj", obj, "code", code)
ns.ClearCondition(obj, code)
}
case api.CCCSubtree:
if !isSubtree[obj.Namespace] {
log.Info("Cleared obsolete condition from old descendant", "obj", obj, "code", code)
ns.ClearCondition(obj, code)
}
default:
err := errors.New("no ClearConditionCriterion")
log.Error(err, "In clearObsoleteConditions", "code", code, "obj", obj)
}
}
}
}
// SetCondition sets a condition for the specified object and code, returning true if it does not
// exist previously or if the message has changed.
//
// Returns true if the condition wasn't previously set
func (ns *Namespace) SetCondition(obj api.AffectedObject, code api.Code, msg string) bool {
changed := false
if _, existed := ns.conditions[obj]; !existed {
changed = true
ns.conditions[obj] = map[api.Code]string{}
}
if oldMsg, existed := ns.conditions[obj][code]; !existed || msg != oldMsg {
changed = true
ns.conditions[obj][code] = msg
}
return changed
}
// SetLocalCondition sets a condition that applies to the current namespace.
func (ns *Namespace) SetLocalCondition(code api.Code, msg string) bool {
return ns.SetCondition(api.AffectedObject{}, code, msg)
}
// Conditions returns a list of conditions in the namespace in the format expected by the API.
func (ns *Namespace) Conditions() []api.Condition {
// Treat the code/msg combination as a combined key.
type codeMsg struct {
code api.Code
msg string
}
// Reorder so that the objects are grouped by code and message
byCM := map[codeMsg][]api.AffectedObject{}
for obj, codes := range ns.conditions {
for code, msg := range codes {
cm := codeMsg{code: code, msg: msg}
byCM[cm] = append(byCM[cm], obj)
}
}
// Flatten into a list of conditions
conds := []api.Condition{}
for cm, objs := range byCM {
// If the only affected object is unnamed (e.g., it refers to the current namespace), omit it.
c := api.Condition{Code: cm.code, Msg: cm.msg}
if len(objs) > 0 || objs[0].Name != "" {
api.SortAffectedObjects(objs)
c.Affects = objs
}
conds = append(conds, c)
}
sort.Slice(conds, func(i, j int) bool {
if conds[i].Code != conds[j].Code {
return conds[i].Code < conds[j].Code
}
return conds[i].Msg < conds[j].Msg
})
if len(conds) == 0 {
conds = nil // prevent anything from appearing in the status
}
return conds
}
// DescendantNames returns a slice of strings like ["achild", "agrandchild", "bchild", ...] of names
// of all namespaces in its subtree, or nil if the namespace has no descendents. The names are
// returned in alphabetical order (as defined by `sort.Strings()`), *not* depth-first,
// breadth-first, etc.
//
// This method is cycle-safe. If there are cycles, each namespace is only listed once.
func (ns *Namespace) DescendantNames() []string {
ds := map[string]bool{}
ns.populateDescendants(ds)
if len(ds) == 0 {
return nil
}
d := []string{}
for k, _ := range ds {
d = append(d, k)
}
sort.Strings(d)
return d
}
// populateDescendants is a cycle-safe way of finding all descendants of a namespace. If any
// namespace turns out to be its own descendant, it's skipped on subsequent encounters.
func (ns *Namespace) populateDescendants(d map[string]bool) {
for _, c := range ns.ChildNames() {
if d[c] {
continue
}
d[c] = true
cns := ns.forest.Get(c)
cns.populateDescendants(d)
}
}
| CanSetParent | identifier_name |
forest.go | // Package forest defines the Forest type.
package forest
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
api "github.com/kubernetes-sigs/multi-tenancy/incubator/hnc/api/v1alpha1"
)
var (
// OutOfSync is used to report a precondition failure. It's not (currently) returned from this
// package but is used externally.
OutOfSync = errors.New("The forest is out of sync with itself")
)
// TypeSyncer syncs objects of a specific type. Reconcilers implement the interface so that they can be
// called by the HierarchyReconciler if the hierarchy changes.
type TypeSyncer interface {
// SyncNamespace syncs objects of a namespace for a specific type.
SyncNamespace(context.Context, logr.Logger, string) error
// Provides the GVK that is handled by the reconciler who implements the interface.
GetGVK() schema.GroupVersionKind
// SetMode sets the propagation mode of objects that are handled by the reconciler who implements the interface.
// The method also syncs objects in the cluster for the type handled by the reconciler if necessary.
SetMode(context.Context, api.SynchronizationMode, logr.Logger) error
// GetMode gets the propagation mode of objects that are handled by the reconciler who implements the interface.
GetMode() api.SynchronizationMode
// GetNumPropagatedObjects returns the number of propagated objects on the apiserver.
GetNumPropagatedObjects() int
}
// NumObjectsSyncer syncs the number of propagated and source objects. ConfigReconciler implements the
// interface so that it can be called by an ObjectReconciler if the number of propagated or source objects is changed.
type NumObjectsSyncer interface {
SyncNumObjects(logr.Logger)
}
// Forest defines a forest of namespaces - that is, a set of trees. It includes methods to mutate
// the forest legally (ie, prevent cycles).
//
// The forest should always be locked/unlocked (via the `Lock` and `Unlock` methods) while it's
// being mutated to avoid different controllers from making inconsistent changes.
type Forest struct {
lock sync.Mutex
namespaces namedNamespaces
// types is a list of other reconcilers that HierarchyReconciler can call if the hierarchy
// changes. This will force all objects to be re-propagated.
//
// This is probably wildly inefficient, and we can probably make better use of things like
// owner references to make this better. But for a PoC, it works just fine.
//
// We put the list in the forest because the access to the list is guarded by the forest lock.
// We can also move the lock out of the forest and pass it to all reconcilers that need the lock.
// In that way, we don't need to put the list in the forest.
types []TypeSyncer
// ObjectsStatusSyncer is the ConfigReconciler that an object reconciler can call if the status of the HNCConfiguration
// object needs to be updated.
ObjectsStatusSyncer NumObjectsSyncer
}
func NewForest() *Forest {
return &Forest{
namespaces: namedNamespaces{},
types: []TypeSyncer{},
}
}
func (f *Forest) Lock() {
f.lock.Lock()
}
func (f *Forest) Unlock() {
f.lock.Unlock()
}
// AddTypeSyncer adds a reconciler to the types list.
func (f *Forest) AddTypeSyncer(nss TypeSyncer) {
f.types = append(f.types, nss)
}
// GetTypeSyncer returns the reconciler for the given GVK or nil if the reconciler
// does not exist.
func (f *Forest) GetTypeSyncer(gvk schema.GroupVersionKind) TypeSyncer {
for _, t := range f.types {
if t.GetGVK() == gvk {
return t
}
}
return nil
}
// GetTypeSyncers returns the types list.
// Retuns a copy here so that the caller does not need to hold the mutex while accessing the returned value and can modify the
// returned value without fear of corrupting the original types list.
func (f *Forest) GetTypeSyncers() []TypeSyncer {
types := make([]TypeSyncer, len(f.types))
copy(types, f.types)
return types
}
// Get returns a `Namespace` object representing a namespace in K8s.
func (f *Forest) Get(nm string) *Namespace {
if nm == "" {
// Useful in cases where "no parent" is represented by an empty string, e.g. in the HC's
// .spec.parent field.
return nil
}
ns, ok := f.namespaces[nm]
if ok {
return ns
}
ns = &Namespace{
forest: f,
name: nm,
children: namedNamespaces{},
conditions: conditions{},
originalObjects: objects{},
}
f.namespaces[nm] = ns
return ns
}
// GetNamespaceNames returns names of all namespaces in the cluster.
func (f *Forest) GetNamespaceNames() []string {
names := []string{}
for nm := range f.namespaces {
names = append(names, nm)
}
return names
}
type namedNamespaces map[string]*Namespace
// While storing the V in GVK is not strictly necessary to match what's in the HNC type configuration,
// as a client of the API server, HNC will be to be reading and writing versions of the API to communicate
// with the API server. Since we need the V to work with the API server anyways anyways, we will choose to
// use the GVK as the key in this map.
type objects map[schema.GroupVersionKind]map[string]*unstructured.Unstructured
// conditions stores the conditions for a single namespace, in the form obj -> code -> msg. Note
// that only one message can be stored per obj and code.
type conditions map[api.AffectedObject]map[api.Code]string
// Namespace represents a namespace in a forest. Other than its structure, it contains some
// properties useful to the reconcilers.
type Namespace struct {
forest *Forest
name string
parent *Namespace
children namedNamespaces
exists bool
allowCascadingDelete bool
// originalObjects store the objects created by users, identified by GVK and name.
// It serves as the source of truth for object controllers to propagate objects.
originalObjects objects
// conditions store conditions so that object propagation can be disabled if there's a problem
// on this namespace.
conditions conditions
// IsSub indicates that this namespace is being or was created solely to live as a
// subnamespace of the specified parent.
IsSub bool
// Anchors store a list of anchors in the namespace.
Anchors []string
}
// Exists returns true if the namespace exists.
func (ns *Namespace) Exists() bool {
return ns.exists
}
// SetExists marks this namespace as existing, returning true if didn't previously exist.
func (ns *Namespace) SetExists() bool {
changed := !ns.exists
ns.exists = true
return changed
}
// UnsetExists marks this namespace as missing, returning true if it previously existed. It also
// removes it from its parent, if any, since a nonexistent namespace can't have a parent.
func (ns *Namespace) UnsetExists() bool {
changed := ns.exists
ns.SetParent(nil) // Unreconciled namespaces can't specify parents
ns.exists = false
ns.clean() // clean up if this is a useless data structure
return changed
}
// clean garbage collects this namespace if it has a zero value.
func (ns *Namespace) clean() {
// Don't clean up something that either exists or is otherwise referenced.
if ns.exists || len(ns.children) > 0 {
return
}
// Remove from the forest.
delete(ns.forest.namespaces, ns.name)
}
// UpdateAllowCascadingDelete updates if this namespace allows cascading deletion.
func (ns *Namespace) UpdateAllowCascadingDelete(acd bool) {
ns.allowCascadingDelete = acd
}
// AllowsCascadingDelete returns if the namespace's or any of the owner ancestors'
// allowCascadingDelete field is set to true.
func (ns *Namespace) AllowsCascadingDelete() bool {
if ns.allowCascadingDelete == true {
return true
}
if !ns.IsSub {
return false
}
// This is a subnamespace so it must have a non-nil parent. If the parent is missing, it will
// return the default false.
//
// Subnamespaces can never be involved in cycles, since those can only occur at the "top" of a
// tree and subnamespaces cannot be roots by definition. So this line can't cause a stack
// overflow.
return ns.parent.AllowsCascadingDelete()
}
// SetParent modifies the namespace's parent, including updating the list of children. It may result
// in a cycle being created; this can be prevented by calling CanSetParent before, or seeing if it
// happened by calling CycleNames afterwards.
func (ns *Namespace) SetParent(p *Namespace) {
// Remove old parent and cleans it up.
if ns.parent != nil {
delete(ns.parent.children, ns.name)
if len(ns.parent.children) == 0 {
ns.parent.clean()
}
}
// Update new parent.
ns.parent = p
if p != nil {
p.children[ns.name] = ns
}
}
// CanSetParent returns the empty string if the assignment is currently legal, or a non-empty string
// indicating the reason if it cannot be done.
func (ns *Namespace) CanSetParent(p *Namespace) string {
if p == nil {
return ""
}
// Simple case
if p == ns {
return fmt.Sprintf("%q cannot be set as its own parent", p.name)
}
// Check for cycles; see if the current namespace (the proposed child) is already an ancestor of
// the proposed parent. Start at the end of the ancestry (e.g. at the proposed parent) and work
// our way up to the root.
ancestors := p.AncestryNames()
cycle := []string{}
found := false
for i := len(ancestors) - 1; !found && i >= 0; i-- {
cycle = append(cycle, ancestors[i])
found = (ancestors[i] == ns.name)
}
if found {
return fmt.Sprintf("cycle when making %q the parent of %q: current ancestry is %s",
p.name, ns.name, strings.Join(cycle, " -> "))
}
return ""
}
func (ns *Namespace) Name() string {
if ns == nil {
return "<none>"
}
return ns.name
}
func (ns *Namespace) Parent() *Namespace {
return ns.parent
}
// ChildNames returns a sorted list of names or nil if there are no children.
func (ns *Namespace) ChildNames() []string {
if len(ns.children) == 0 {
return nil
}
nms := []string{}
for k := range ns.children {
nms = append(nms, k)
}
sort.Strings(nms)
return nms
}
// RelativesNames returns the children and parent.
func (ns *Namespace) RelativesNames() []string {
a := []string{}
if ns.parent != nil {
a = append(a, ns.parent.name)
}
for k := range ns.children {
a = append(a, k)
}
return a
}
// AncestryNames returns all ancestors of this namespace. The namespace itself is the last element
// of the returned slice, with the root at the beginning of the list.
//
// This method is cycle-safe, and can be used to detect and recover from cycles. If there's a cycle,
// the first ancestor that's a member of the cycle we encounter is repeated at the beginning of the
// list.
func (ns *Namespace) AncestryNames() []string {
if ns == nil {
return nil
}
cycleCheck := map[string]bool{ns.name: true}
ancestors := []string{ns.name}
anc := ns.parent
for anc != nil {
ancestors = append([]string{anc.name}, ancestors...)
if cycleCheck[anc.name] {
return ancestors
}
cycleCheck[anc.name] = true
anc = anc.parent
}
return ancestors
}
// CycleNames returns nil if the namespace is not in a cycle, or a list of names in the cycle if
// it is. All namespaces in the cycle return the same list, which is the same as calling
// ns.AncestryNames() on the namespaces with the lexicographically smallest name.
func (ns *Namespace) CycleNames() []string {
// If this namespaces is *in* a cycle, it will be the first repeated element encountered by
// AncestryNames(), and therefore will be both the first and the last element.
ancestors := ns.AncestryNames()
if len(ancestors) == 1 || ancestors[0] != ns.name {
return nil
}
ancestors = ancestors[1:] // don't need the repeated element
// Find the smallest name and where it is
sidx := 0
snm := ancestors[0]
for idx, nm := range ancestors {
if nm < snm {
sidx = idx
snm = nm
}
}
// Rotate the slice, and then duplicate the smallest element
ancestors = append(ancestors[sidx:], ancestors[:sidx]...)
return append(ancestors, snm)
}
// SetAnchors updates the anchors and returns a difference between the new/old list.
func (ns *Namespace) SetAnchors(anchors []string) (diff []string) {
add := make(map[string]bool)
for _, nm := range anchors {
add[nm] = true
}
for _, nm := range ns.Anchors {
if add[nm] {
delete(add, nm)
} else {
// This old anchor is not in the new anchor list.
diff = append(diff, nm)
}
}
for nm, _ := range add {
// This new anchor is not in the old anchor list.
diff = append(diff, nm)
}
ns.Anchors = anchors
return
}
// SetOriginalObject updates or creates the original object in the namespace in the forest.
func (ns *Namespace) SetOriginalObject(obj *unstructured.Unstructured) {
gvk := obj.GroupVersionKind()
name := obj.GetName()
_, ok := ns.originalObjects[gvk]
if !ok {
ns.originalObjects[gvk] = map[string]*unstructured.Unstructured{}
}
ns.originalObjects[gvk][name] = obj
}
// GetOriginalObject gets an original object by name. It returns nil, if the object doesn't exist.
func (ns *Namespace) GetOriginalObject(gvk schema.GroupVersionKind, nm string) *unstructured.Unstructured {
return ns.originalObjects[gvk][nm]
}
// HasOriginalObject returns if the namespace has an original object.
func (ns *Namespace) HasOriginalObject(gvk schema.GroupVersionKind, oo string) bool {
return ns.GetOriginalObject(gvk, oo) != nil
}
// DeleteOriginalObject deletes an original object by name.
func (ns *Namespace) DeleteOriginalObject(gvk schema.GroupVersionKind, nm string) {
delete(ns.originalObjects[gvk], nm)
// Garbage collection
if len(ns.originalObjects[gvk]) == 0 {
delete(ns.originalObjects, gvk)
}
}
// GetOriginalObjects returns all original objects in the namespace.
func (ns *Namespace) GetOriginalObjects(gvk schema.GroupVersionKind) []*unstructured.Unstructured {
o := []*unstructured.Unstructured{}
for _, obj := range ns.originalObjects[gvk] {
o = append(o, obj)
}
return o
}
// GetNumOriginalObjects returns the total number of original objects of a specific GVK in the namespace.
func (ns *Namespace) GetNumOriginalObjects(gvk schema.GroupVersionKind) int {
return len(ns.originalObjects[gvk])
}
// GetPropagatedObjects returns all original copies in the ancestors.
func (ns *Namespace) GetPropagatedObjects(gvk schema.GroupVersionKind) []*unstructured.Unstructured {
o := []*unstructured.Unstructured{}
ans := ns.AncestryNames()
for _, n := range ans {
// Exclude the original objects in this namespace
if n == ns.name {
continue
}
o = append(o, ns.forest.Get(n).GetOriginalObjects(gvk)...)
}
return o
}
// GetSource returns the original copy in the ancestors if it exists. | pos := ns.GetPropagatedObjects(gvk)
for _, po := range pos {
if po.GetName() == name {
return po
}
}
return nil
}
// IsAncestor is *not* cycle-safe, so should only be called from namespace trees that are known not
// to have cycles.
func (ns *Namespace) IsAncestor(other *Namespace) bool {
if ns.parent == other {
return true
}
if ns.parent == nil {
return false
}
return ns.parent.IsAncestor(other)
}
// HasLocalCritCondition returns if the namespace itself has any local critical conditions, ignoring
// its ancestors. Any code with the "Crit" prefix is a critical condition.
func (ns *Namespace) HasLocalCritCondition() bool {
for code, _ := range ns.conditions[api.AffectedObject{}] {
codeNm := (string)(code)
if strings.HasPrefix(codeNm, "Crit") {
return true
}
}
return false
}
// GetCritAncestor returns the name of the first ancestor with a critical condition, or the empty
// string if there are no such ancestors. It *can* return the name of the current namespace.
func (ns *Namespace) GetCritAncestor() string {
if ns.HasLocalCritCondition() {
return ns.name
}
if ns.Parent() == nil {
return ""
}
return ns.Parent().GetCritAncestor()
}
// HasCondition returns true if there's a condition with the given object and code. If code is the
// empty string, it returns true if there's _any_ condition for the given object.
func (ns *Namespace) HasCondition(obj api.AffectedObject, code api.Code) bool {
if _, exists := ns.conditions[obj]; !exists {
// Nothing for this obj
return false
}
if code == "" {
// Something exists for this obj; we don't care what
return true
}
_, exists := ns.conditions[obj][code]
return exists
}
// ClearCondition clears conditions in the namespace for a single object. If `code` is non-empty, it
// only clears conditions with that code, otherwise it clears all conditions for that object. It
// should only be called by the code that also *sets* the conditions.
//
// It returns true if it made any changes, false otherwise.
func (ns *Namespace) ClearCondition(obj api.AffectedObject, code api.Code) bool {
if !ns.HasCondition(obj, code) {
return false
}
if code == "" {
delete(ns.conditions, obj)
} else {
delete(ns.conditions[obj], code)
}
return true
}
// ClearLocalConditions clears the condition(s) on this namespace.
func (ns *Namespace) ClearLocalConditions() bool {
return ns.ClearCondition(api.AffectedObject{}, "")
}
func (ns *Namespace) ClearObsoleteConditions(log logr.Logger) {
// Load ancestors to check CCCAncestors
isAnc := map[string]bool{}
for _, anc := range ns.AncestryNames() {
// The definition of CCCAncestor doesn't include the namespace itself
if anc != ns.name {
isAnc[anc] = true
}
}
// Load the subtree to check CCCSubtree, including the namespace itself.
isSubtree := map[string]bool{ns.name: true}
for _, dsc := range ns.DescendantNames() {
isSubtree[dsc] = true
}
// For each affected object, remove its condition if that object is no longer relevant.
for obj, codes := range ns.conditions {
for code, _ := range codes {
switch api.ClearConditionCriteria[code] {
case api.CCCManual:
// nop - cleared manually
case api.CCCAncestor:
if !isAnc[obj.Namespace] {
log.Info("Cleared obsolete condition from old ancestor", "obj", obj, "code", code)
ns.ClearCondition(obj, code)
}
case api.CCCSubtree:
if !isSubtree[obj.Namespace] {
log.Info("Cleared obsolete condition from old descendant", "obj", obj, "code", code)
ns.ClearCondition(obj, code)
}
default:
err := errors.New("no ClearConditionCriterion")
log.Error(err, "In clearObsoleteConditions", "code", code, "obj", obj)
}
}
}
}
// SetCondition sets a condition for the specified object and code, returning true if it does not
// exist previously or if the message has changed.
//
// Returns true if the condition wasn't previously set
func (ns *Namespace) SetCondition(obj api.AffectedObject, code api.Code, msg string) bool {
changed := false
if _, existed := ns.conditions[obj]; !existed {
changed = true
ns.conditions[obj] = map[api.Code]string{}
}
if oldMsg, existed := ns.conditions[obj][code]; !existed || msg != oldMsg {
changed = true
ns.conditions[obj][code] = msg
}
return changed
}
// SetLocalCondition sets a condition that applies to the current namespace.
func (ns *Namespace) SetLocalCondition(code api.Code, msg string) bool {
return ns.SetCondition(api.AffectedObject{}, code, msg)
}
// Conditions returns a list of conditions in the namespace in the format expected by the API.
func (ns *Namespace) Conditions() []api.Condition {
// Treat the code/msg combination as a combined key.
type codeMsg struct {
code api.Code
msg string
}
// Reorder so that the objects are grouped by code and message
byCM := map[codeMsg][]api.AffectedObject{}
for obj, codes := range ns.conditions {
for code, msg := range codes {
cm := codeMsg{code: code, msg: msg}
byCM[cm] = append(byCM[cm], obj)
}
}
// Flatten into a list of conditions
conds := []api.Condition{}
for cm, objs := range byCM {
// If the only affected object is unnamed (e.g., it refers to the current namespace), omit it.
c := api.Condition{Code: cm.code, Msg: cm.msg}
if len(objs) > 0 || objs[0].Name != "" {
api.SortAffectedObjects(objs)
c.Affects = objs
}
conds = append(conds, c)
}
sort.Slice(conds, func(i, j int) bool {
if conds[i].Code != conds[j].Code {
return conds[i].Code < conds[j].Code
}
return conds[i].Msg < conds[j].Msg
})
if len(conds) == 0 {
conds = nil // prevent anything from appearing in the status
}
return conds
}
// DescendantNames returns a slice of strings like ["achild", "agrandchild", "bchild", ...] of names
// of all namespaces in its subtree, or nil if the namespace has no descendents. The names are
// returned in alphabetical order (as defined by `sort.Strings()`), *not* depth-first,
// breadth-first, etc.
//
// This method is cycle-safe. If there are cycles, each namespace is only listed once.
func (ns *Namespace) DescendantNames() []string {
ds := map[string]bool{}
ns.populateDescendants(ds)
if len(ds) == 0 {
return nil
}
d := []string{}
for k, _ := range ds {
d = append(d, k)
}
sort.Strings(d)
return d
}
// populateDescendants is a cycle-safe way of finding all descendants of a namespace. If any
// namespace turns out to be its own descendant, it's skipped on subsequent encounters.
func (ns *Namespace) populateDescendants(d map[string]bool) {
for _, c := range ns.ChildNames() {
if d[c] {
continue
}
d[c] = true
cns := ns.forest.Get(c)
cns.populateDescendants(d)
}
} | // Otherwise, return nil.
func (ns *Namespace) GetSource(gvk schema.GroupVersionKind, name string) *unstructured.Unstructured { | random_line_split |
forest.go | // Package forest defines the Forest type.
package forest
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
api "github.com/kubernetes-sigs/multi-tenancy/incubator/hnc/api/v1alpha1"
)
var (
// OutOfSync is used to report a precondition failure. It's not (currently) returned from this
// package but is used externally.
OutOfSync = errors.New("The forest is out of sync with itself")
)
// TypeSyncer syncs objects of a specific type. Reconcilers implement the interface so that they can be
// called by the HierarchyReconciler if the hierarchy changes.
type TypeSyncer interface {
// SyncNamespace syncs objects of a namespace for a specific type.
SyncNamespace(context.Context, logr.Logger, string) error
// Provides the GVK that is handled by the reconciler who implements the interface.
GetGVK() schema.GroupVersionKind
// SetMode sets the propagation mode of objects that are handled by the reconciler who implements the interface.
// The method also syncs objects in the cluster for the type handled by the reconciler if necessary.
SetMode(context.Context, api.SynchronizationMode, logr.Logger) error
// GetMode gets the propagation mode of objects that are handled by the reconciler who implements the interface.
GetMode() api.SynchronizationMode
// GetNumPropagatedObjects returns the number of propagated objects on the apiserver.
GetNumPropagatedObjects() int
}
// NumObjectsSyncer syncs the number of propagated and source objects. ConfigReconciler implements the
// interface so that it can be called by an ObjectReconciler if the number of propagated or source objects is changed.
type NumObjectsSyncer interface {
SyncNumObjects(logr.Logger)
}
// Forest defines a forest of namespaces - that is, a set of trees. It includes methods to mutate
// the forest legally (ie, prevent cycles).
//
// The forest should always be locked/unlocked (via the `Lock` and `Unlock` methods) while it's
// being mutated to avoid different controllers from making inconsistent changes.
type Forest struct {
lock sync.Mutex
namespaces namedNamespaces
// types is a list of other reconcilers that HierarchyReconciler can call if the hierarchy
// changes. This will force all objects to be re-propagated.
//
// This is probably wildly inefficient, and we can probably make better use of things like
// owner references to make this better. But for a PoC, it works just fine.
//
// We put the list in the forest because the access to the list is guarded by the forest lock.
// We can also move the lock out of the forest and pass it to all reconcilers that need the lock.
// In that way, we don't need to put the list in the forest.
types []TypeSyncer
// ObjectsStatusSyncer is the ConfigReconciler that an object reconciler can call if the status of the HNCConfiguration
// object needs to be updated.
ObjectsStatusSyncer NumObjectsSyncer
}
func NewForest() *Forest {
return &Forest{
namespaces: namedNamespaces{},
types: []TypeSyncer{},
}
}
func (f *Forest) Lock() {
f.lock.Lock()
}
func (f *Forest) Unlock() {
f.lock.Unlock()
}
// AddTypeSyncer adds a reconciler to the types list.
func (f *Forest) AddTypeSyncer(nss TypeSyncer) {
f.types = append(f.types, nss)
}
// GetTypeSyncer returns the reconciler for the given GVK or nil if the reconciler
// does not exist.
func (f *Forest) GetTypeSyncer(gvk schema.GroupVersionKind) TypeSyncer {
for _, t := range f.types {
if t.GetGVK() == gvk {
return t
}
}
return nil
}
// GetTypeSyncers returns the types list.
// Retuns a copy here so that the caller does not need to hold the mutex while accessing the returned value and can modify the
// returned value without fear of corrupting the original types list.
func (f *Forest) GetTypeSyncers() []TypeSyncer {
types := make([]TypeSyncer, len(f.types))
copy(types, f.types)
return types
}
// Get returns a `Namespace` object representing a namespace in K8s.
func (f *Forest) Get(nm string) *Namespace {
if nm == "" {
// Useful in cases where "no parent" is represented by an empty string, e.g. in the HC's
// .spec.parent field.
return nil
}
ns, ok := f.namespaces[nm]
if ok {
return ns
}
ns = &Namespace{
forest: f,
name: nm,
children: namedNamespaces{},
conditions: conditions{},
originalObjects: objects{},
}
f.namespaces[nm] = ns
return ns
}
// GetNamespaceNames returns names of all namespaces in the cluster.
func (f *Forest) GetNamespaceNames() []string {
names := []string{}
for nm := range f.namespaces {
names = append(names, nm)
}
return names
}
type namedNamespaces map[string]*Namespace
// While storing the V in GVK is not strictly necessary to match what's in the HNC type configuration,
// as a client of the API server, HNC will be to be reading and writing versions of the API to communicate
// with the API server. Since we need the V to work with the API server anyways anyways, we will choose to
// use the GVK as the key in this map.
type objects map[schema.GroupVersionKind]map[string]*unstructured.Unstructured
// conditions stores the conditions for a single namespace, in the form obj -> code -> msg. Note
// that only one message can be stored per obj and code.
type conditions map[api.AffectedObject]map[api.Code]string
// Namespace represents a namespace in a forest. Other than its structure, it contains some
// properties useful to the reconcilers.
type Namespace struct {
forest *Forest
name string
parent *Namespace
children namedNamespaces
exists bool
allowCascadingDelete bool
// originalObjects store the objects created by users, identified by GVK and name.
// It serves as the source of truth for object controllers to propagate objects.
originalObjects objects
// conditions store conditions so that object propagation can be disabled if there's a problem
// on this namespace.
conditions conditions
// IsSub indicates that this namespace is being or was created solely to live as a
// subnamespace of the specified parent.
IsSub bool
// Anchors store a list of anchors in the namespace.
Anchors []string
}
// Exists returns true if the namespace exists.
func (ns *Namespace) Exists() bool {
return ns.exists
}
// SetExists marks this namespace as existing, returning true if didn't previously exist.
func (ns *Namespace) SetExists() bool {
changed := !ns.exists
ns.exists = true
return changed
}
// UnsetExists marks this namespace as missing, returning true if it previously existed. It also
// removes it from its parent, if any, since a nonexistent namespace can't have a parent.
func (ns *Namespace) UnsetExists() bool {
changed := ns.exists
ns.SetParent(nil) // Unreconciled namespaces can't specify parents
ns.exists = false
ns.clean() // clean up if this is a useless data structure
return changed
}
// clean garbage collects this namespace if it has a zero value.
func (ns *Namespace) clean() {
// Don't clean up something that either exists or is otherwise referenced.
if ns.exists || len(ns.children) > 0 {
return
}
// Remove from the forest.
delete(ns.forest.namespaces, ns.name)
}
// UpdateAllowCascadingDelete updates if this namespace allows cascading deletion.
func (ns *Namespace) UpdateAllowCascadingDelete(acd bool) {
ns.allowCascadingDelete = acd
}
// AllowsCascadingDelete returns if the namespace's or any of the owner ancestors'
// allowCascadingDelete field is set to true.
func (ns *Namespace) AllowsCascadingDelete() bool {
if ns.allowCascadingDelete == true {
return true
}
if !ns.IsSub {
return false
}
// This is a subnamespace so it must have a non-nil parent. If the parent is missing, it will
// return the default false.
//
// Subnamespaces can never be involved in cycles, since those can only occur at the "top" of a
// tree and subnamespaces cannot be roots by definition. So this line can't cause a stack
// overflow.
return ns.parent.AllowsCascadingDelete()
}
// SetParent modifies the namespace's parent, including updating the list of children. It may result
// in a cycle being created; this can be prevented by calling CanSetParent before, or seeing if it
// happened by calling CycleNames afterwards.
func (ns *Namespace) SetParent(p *Namespace) {
// Remove old parent and cleans it up.
if ns.parent != nil {
delete(ns.parent.children, ns.name)
if len(ns.parent.children) == 0 {
ns.parent.clean()
}
}
// Update new parent.
ns.parent = p
if p != nil {
p.children[ns.name] = ns
}
}
// CanSetParent returns the empty string if the assignment is currently legal, or a non-empty string
// indicating the reason if it cannot be done.
func (ns *Namespace) CanSetParent(p *Namespace) string {
if p == nil {
return ""
}
// Simple case
if p == ns {
return fmt.Sprintf("%q cannot be set as its own parent", p.name)
}
// Check for cycles; see if the current namespace (the proposed child) is already an ancestor of
// the proposed parent. Start at the end of the ancestry (e.g. at the proposed parent) and work
// our way up to the root.
ancestors := p.AncestryNames()
cycle := []string{}
found := false
for i := len(ancestors) - 1; !found && i >= 0; i-- {
cycle = append(cycle, ancestors[i])
found = (ancestors[i] == ns.name)
}
if found {
return fmt.Sprintf("cycle when making %q the parent of %q: current ancestry is %s",
p.name, ns.name, strings.Join(cycle, " -> "))
}
return ""
}
func (ns *Namespace) Name() string {
if ns == nil {
return "<none>"
}
return ns.name
}
func (ns *Namespace) Parent() *Namespace {
return ns.parent
}
// ChildNames returns a sorted list of names or nil if there are no children.
func (ns *Namespace) ChildNames() []string {
if len(ns.children) == 0 {
return nil
}
nms := []string{}
for k := range ns.children {
nms = append(nms, k)
}
sort.Strings(nms)
return nms
}
// RelativesNames returns the children and parent.
func (ns *Namespace) RelativesNames() []string {
a := []string{}
if ns.parent != nil {
a = append(a, ns.parent.name)
}
for k := range ns.children {
a = append(a, k)
}
return a
}
// AncestryNames returns all ancestors of this namespace. The namespace itself is the last element
// of the returned slice, with the root at the beginning of the list.
//
// This method is cycle-safe, and can be used to detect and recover from cycles. If there's a cycle,
// the first ancestor that's a member of the cycle we encounter is repeated at the beginning of the
// list.
func (ns *Namespace) AncestryNames() []string {
if ns == nil {
return nil
}
cycleCheck := map[string]bool{ns.name: true}
ancestors := []string{ns.name}
anc := ns.parent
for anc != nil {
ancestors = append([]string{anc.name}, ancestors...)
if cycleCheck[anc.name] {
return ancestors
}
cycleCheck[anc.name] = true
anc = anc.parent
}
return ancestors
}
// CycleNames returns nil if the namespace is not in a cycle, or a list of names in the cycle if
// it is. All namespaces in the cycle return the same list, which is the same as calling
// ns.AncestryNames() on the namespaces with the lexicographically smallest name.
func (ns *Namespace) CycleNames() []string {
// If this namespaces is *in* a cycle, it will be the first repeated element encountered by
// AncestryNames(), and therefore will be both the first and the last element.
ancestors := ns.AncestryNames()
if len(ancestors) == 1 || ancestors[0] != ns.name |
ancestors = ancestors[1:] // don't need the repeated element
// Find the smallest name and where it is
sidx := 0
snm := ancestors[0]
for idx, nm := range ancestors {
if nm < snm {
sidx = idx
snm = nm
}
}
// Rotate the slice, and then duplicate the smallest element
ancestors = append(ancestors[sidx:], ancestors[:sidx]...)
return append(ancestors, snm)
}
// SetAnchors updates the anchors and returns a difference between the new/old list.
func (ns *Namespace) SetAnchors(anchors []string) (diff []string) {
add := make(map[string]bool)
for _, nm := range anchors {
add[nm] = true
}
for _, nm := range ns.Anchors {
if add[nm] {
delete(add, nm)
} else {
// This old anchor is not in the new anchor list.
diff = append(diff, nm)
}
}
for nm, _ := range add {
// This new anchor is not in the old anchor list.
diff = append(diff, nm)
}
ns.Anchors = anchors
return
}
// SetOriginalObject updates or creates the original object in the namespace in the forest.
func (ns *Namespace) SetOriginalObject(obj *unstructured.Unstructured) {
gvk := obj.GroupVersionKind()
name := obj.GetName()
_, ok := ns.originalObjects[gvk]
if !ok {
ns.originalObjects[gvk] = map[string]*unstructured.Unstructured{}
}
ns.originalObjects[gvk][name] = obj
}
// GetOriginalObject gets an original object by name. It returns nil, if the object doesn't exist.
func (ns *Namespace) GetOriginalObject(gvk schema.GroupVersionKind, nm string) *unstructured.Unstructured {
return ns.originalObjects[gvk][nm]
}
// HasOriginalObject returns if the namespace has an original object.
func (ns *Namespace) HasOriginalObject(gvk schema.GroupVersionKind, oo string) bool {
return ns.GetOriginalObject(gvk, oo) != nil
}
// DeleteOriginalObject deletes an original object by name.
func (ns *Namespace) DeleteOriginalObject(gvk schema.GroupVersionKind, nm string) {
delete(ns.originalObjects[gvk], nm)
// Garbage collection
if len(ns.originalObjects[gvk]) == 0 {
delete(ns.originalObjects, gvk)
}
}
// GetOriginalObjects returns all original objects in the namespace.
func (ns *Namespace) GetOriginalObjects(gvk schema.GroupVersionKind) []*unstructured.Unstructured {
o := []*unstructured.Unstructured{}
for _, obj := range ns.originalObjects[gvk] {
o = append(o, obj)
}
return o
}
// GetNumOriginalObjects returns the total number of original objects of a specific GVK in the namespace.
func (ns *Namespace) GetNumOriginalObjects(gvk schema.GroupVersionKind) int {
return len(ns.originalObjects[gvk])
}
// GetPropagatedObjects returns all original copies in the ancestors.
func (ns *Namespace) GetPropagatedObjects(gvk schema.GroupVersionKind) []*unstructured.Unstructured {
o := []*unstructured.Unstructured{}
ans := ns.AncestryNames()
for _, n := range ans {
// Exclude the original objects in this namespace
if n == ns.name {
continue
}
o = append(o, ns.forest.Get(n).GetOriginalObjects(gvk)...)
}
return o
}
// GetSource returns the original copy in the ancestors if it exists.
// Otherwise, return nil.
func (ns *Namespace) GetSource(gvk schema.GroupVersionKind, name string) *unstructured.Unstructured {
pos := ns.GetPropagatedObjects(gvk)
for _, po := range pos {
if po.GetName() == name {
return po
}
}
return nil
}
// IsAncestor is *not* cycle-safe, so should only be called from namespace trees that are known not
// to have cycles.
func (ns *Namespace) IsAncestor(other *Namespace) bool {
if ns.parent == other {
return true
}
if ns.parent == nil {
return false
}
return ns.parent.IsAncestor(other)
}
// HasLocalCritCondition returns if the namespace itself has any local critical conditions, ignoring
// its ancestors. Any code with the "Crit" prefix is a critical condition.
func (ns *Namespace) HasLocalCritCondition() bool {
for code, _ := range ns.conditions[api.AffectedObject{}] {
codeNm := (string)(code)
if strings.HasPrefix(codeNm, "Crit") {
return true
}
}
return false
}
// GetCritAncestor returns the name of the first ancestor with a critical condition, or the empty
// string if there are no such ancestors. It *can* return the name of the current namespace.
func (ns *Namespace) GetCritAncestor() string {
if ns.HasLocalCritCondition() {
return ns.name
}
if ns.Parent() == nil {
return ""
}
return ns.Parent().GetCritAncestor()
}
// HasCondition returns true if there's a condition with the given object and code. If code is the
// empty string, it returns true if there's _any_ condition for the given object.
func (ns *Namespace) HasCondition(obj api.AffectedObject, code api.Code) bool {
if _, exists := ns.conditions[obj]; !exists {
// Nothing for this obj
return false
}
if code == "" {
// Something exists for this obj; we don't care what
return true
}
_, exists := ns.conditions[obj][code]
return exists
}
// ClearCondition clears conditions in the namespace for a single object. If `code` is non-empty, it
// only clears conditions with that code, otherwise it clears all conditions for that object. It
// should only be called by the code that also *sets* the conditions.
//
// It returns true if it made any changes, false otherwise.
func (ns *Namespace) ClearCondition(obj api.AffectedObject, code api.Code) bool {
if !ns.HasCondition(obj, code) {
return false
}
if code == "" {
delete(ns.conditions, obj)
} else {
delete(ns.conditions[obj], code)
}
return true
}
// ClearLocalConditions clears the condition(s) on this namespace.
func (ns *Namespace) ClearLocalConditions() bool {
return ns.ClearCondition(api.AffectedObject{}, "")
}
func (ns *Namespace) ClearObsoleteConditions(log logr.Logger) {
// Load ancestors to check CCCAncestors
isAnc := map[string]bool{}
for _, anc := range ns.AncestryNames() {
// The definition of CCCAncestor doesn't include the namespace itself
if anc != ns.name {
isAnc[anc] = true
}
}
// Load the subtree to check CCCSubtree, including the namespace itself.
isSubtree := map[string]bool{ns.name: true}
for _, dsc := range ns.DescendantNames() {
isSubtree[dsc] = true
}
// For each affected object, remove its condition if that object is no longer relevant.
for obj, codes := range ns.conditions {
for code, _ := range codes {
switch api.ClearConditionCriteria[code] {
case api.CCCManual:
// nop - cleared manually
case api.CCCAncestor:
if !isAnc[obj.Namespace] {
log.Info("Cleared obsolete condition from old ancestor", "obj", obj, "code", code)
ns.ClearCondition(obj, code)
}
case api.CCCSubtree:
if !isSubtree[obj.Namespace] {
log.Info("Cleared obsolete condition from old descendant", "obj", obj, "code", code)
ns.ClearCondition(obj, code)
}
default:
err := errors.New("no ClearConditionCriterion")
log.Error(err, "In clearObsoleteConditions", "code", code, "obj", obj)
}
}
}
}
// SetCondition sets a condition for the specified object and code, returning true if it does not
// exist previously or if the message has changed.
//
// Returns true if the condition wasn't previously set
func (ns *Namespace) SetCondition(obj api.AffectedObject, code api.Code, msg string) bool {
changed := false
if _, existed := ns.conditions[obj]; !existed {
changed = true
ns.conditions[obj] = map[api.Code]string{}
}
if oldMsg, existed := ns.conditions[obj][code]; !existed || msg != oldMsg {
changed = true
ns.conditions[obj][code] = msg
}
return changed
}
// SetLocalCondition sets a condition that applies to the current namespace.
func (ns *Namespace) SetLocalCondition(code api.Code, msg string) bool {
return ns.SetCondition(api.AffectedObject{}, code, msg)
}
// Conditions returns a list of conditions in the namespace in the format expected by the API.
func (ns *Namespace) Conditions() []api.Condition {
// Treat the code/msg combination as a combined key.
type codeMsg struct {
code api.Code
msg string
}
// Reorder so that the objects are grouped by code and message
byCM := map[codeMsg][]api.AffectedObject{}
for obj, codes := range ns.conditions {
for code, msg := range codes {
cm := codeMsg{code: code, msg: msg}
byCM[cm] = append(byCM[cm], obj)
}
}
// Flatten into a list of conditions
conds := []api.Condition{}
for cm, objs := range byCM {
// If the only affected object is unnamed (e.g., it refers to the current namespace), omit it.
c := api.Condition{Code: cm.code, Msg: cm.msg}
if len(objs) > 0 || objs[0].Name != "" {
api.SortAffectedObjects(objs)
c.Affects = objs
}
conds = append(conds, c)
}
sort.Slice(conds, func(i, j int) bool {
if conds[i].Code != conds[j].Code {
return conds[i].Code < conds[j].Code
}
return conds[i].Msg < conds[j].Msg
})
if len(conds) == 0 {
conds = nil // prevent anything from appearing in the status
}
return conds
}
// DescendantNames returns a slice of strings like ["achild", "agrandchild", "bchild", ...] of names
// of all namespaces in its subtree, or nil if the namespace has no descendents. The names are
// returned in alphabetical order (as defined by `sort.Strings()`), *not* depth-first,
// breadth-first, etc.
//
// This method is cycle-safe. If there are cycles, each namespace is only listed once.
func (ns *Namespace) DescendantNames() []string {
ds := map[string]bool{}
ns.populateDescendants(ds)
if len(ds) == 0 {
return nil
}
d := []string{}
for k, _ := range ds {
d = append(d, k)
}
sort.Strings(d)
return d
}
// populateDescendants is a cycle-safe way of finding all descendants of a namespace. If any
// namespace turns out to be its own descendant, it's skipped on subsequent encounters.
func (ns *Namespace) populateDescendants(d map[string]bool) {
for _, c := range ns.ChildNames() {
if d[c] {
continue
}
d[c] = true
cns := ns.forest.Get(c)
cns.populateDescendants(d)
}
}
| {
return nil
} | conditional_block |
marker.js | /**
*
* 图片标记器
* Author: tianchungang,wangfeng
* e-mail: wfeng007@163.com
* Date: 14-1-12
* Time: 下午8:22
* version: 0.2
*/
(function ($)
{
//这三个是?
var MarkerManager = {
container:{}
};
MarkerManager.setMarker = function(id,marker){
this.container[id] = marker;
};
MarkerManager.getMarker = function(id){
return this.container[id];
};
//
//jquery对象增加方法marker用来生成
$.fn.marker = function (options)
{
this.options = {
//基本参数
picUrl:"", //底层图片的地址
markId:"", //指定用来标注模板对象集合的ID //如:ul li组合。ul id="markerTag",li包含标注图片。
markerUrl:"", //标注点 数据远程url //获取标注信息(如x、y,数据内容等)的地址,使用post获取。该部分不为null才initjq对象时才会调用内部的loadData函数。
picWidth:null, //图片展示宽度 //createHtml函数中使用
picHeight:null, //图片展示高度 //createHtml函数中使用
isEdit:false, //设置为true,双击标注图标能进行属性编辑 //确定标记板当前为何种模式
//edit mode 编辑模式扩展参数
popWidth:200, //弹出窗口宽度 //编辑模式时点击标注弹出框的大小
popHeight:200, //弹出窗口内容高度 //编辑模式时点击标注弹出框的大小
frame:false, //编辑模式时点击标注弹出框是否是iframe方式
url:"", //远程数据url //这个其实是编辑模式中,当用户点击标注时,在对话框中显示内容的url。frame为true时为frame引用的路径。frame为false时是用post获取该路径的数据。
popTitle:"标注坐标点", //弹窗标题 //编辑模式时点击标注弹出框的大小
hasSelfCur:false, //是否采用自定义cursor //点击标记对象模板后自定义的标记样式。是标记对象模板的src属性制定。
okCallBack:null, //弹窗确定按钮的回调函数,可添加保存数据功能 //编辑模式时点击标注弹出框提供了默认的ok按钮。按钮点击后的回调。(编辑模式下的frame为true时。)
cancelCallBack:null, //编辑模式时点击标注弹出框提供了默认的cancel按钮。按钮点击后的回调。(编辑模式下的frame为true时。)
//view mode 展示模式扩展参数
viewClick:null, //在展示编辑模式下为标注提供的单击扩展回调,如果为null则直接为alert显示标注数据。
//未使用的参数或遗留参数
data:null, //本地渲染数据(坐标与标记图标的地址) //TODO 似乎暂时没有用到
viwUrl:"" //远程展示数据url // TODO 似乎暂时没有用到。
};
$.extend(this.options,options); //参数合并到this.options参数对象
return $.marker.init(this); //返回初始化的marker对象。
};
//获取当前id的值?
$.fn.getMarker = function(){
$(this).attr("id");
};
//marker对象
//?这种写法是否一个页面就只有一个可以标注的的图片了?是否改用prototype或闭包写法。
$.marker = {
init:function(obj){
var p = this.options = obj.options;
var g = this; //marker对象
g.setObj(obj); //jq对象
g.setFlag(-1); //设置标注模板对象选中状态,当状态大于0时,可以单击标注板
g.createHtml(); //render界面
g.initCursor(); //
g.initEvent(); //增加交互用的事件
//
if( p.markerUrl){ //
g.loadData();
}
return this;
},
//创建界面的html元素
//包括一个外部相对布局的外框,内部的底层图片、位于图片上层的标注所在的蒙版(透明并覆盖提成图片。markerlayer)。
//图片显示大小由插件参数picWidth、picHeight决定。
// TODO 建议提供针对已有img对象的包装。直接获取img对象的长宽作为蒙版div的长宽。
createHtml:function(){
var p = this.options;
var g = this;
//
//如果目标jquerydiv中包含img元素则直接使用该img作为图片源。
var $img=this.getObj().children("img").first();
var img=$img.get(0);//dom
if(img){
p.picUrl=$img.attr("src");
p.picWidth=$img.width(); //当前展示的大小
p.picHeight=$img.height(); //当前展示的大小
}
//
var htmlArr = [];
var width = p.picWidth?'width:'+ p.picWidth+'px;':'';
var height = p.picHeight?'height:'+ p.picHeight+'px;':'';
htmlArr.push('<div style="position: relative;'+width+ height+'">');//FIXME 是不是少了结束标志?
htmlArr.push('<img class="baseDrawing" src="'+ p.picUrl+'" style="'+width+height+'" />');
htmlArr.push('<div class="modal" style="position: absolute;z-index: 99;'+width+height+';left:0;float: left;top: 0;"></div>');
htmlArr.push('</div>');
g.getObj().html(htmlArr.join(""));
//补齐p.picWidth p.picHeight,marks绘图使用p的这两个参数作为大小缓存参考使用。FIXME 之后应该直接根据标记板大小作为计算依据。
var $img= g.getObj().find("img.baseDrawing").first();
p.picWidth=$img.width(); //当前展示的大小
p.picHeight=$img.height(); //当前展示的大小
},
//创建蒙版中的标注。(wrapper)生成一个div并放入标注的img对象picObj。
//返回该标注对象
addMarker:function(x,y,picObj){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $modal = $picture.find(".modal");//找到蒙版
var wrapper = $("<div style='position: absolute;' class='marker'/>");
//addMarker的时候增加单击事件
wrapper.bind("click",function(){//单击事件
g.selectedMarker = wrapper; //单击选中
}) ;
wrapper.css({left: x,top: y});
picObj.appendTo(wrapper);
$modal.append(wrapper);
if(p.isEdit){
//编辑状态下包装成可dd的dom
new Dragdrop({
target : wrapper[0] ,
area:[0,$picture.width()-wrapper.width(),0,$picture.height()-wrapper.height()]
});
}
return wrapper;
},
//
// 为界面元素增加操作事件功能
//
initEvent:function(){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $markObj = $("#"+p.markId);
var $modal = $picture.find(".modal");
//jq对象增加单击事件,用来增加标注对象
$picture.click(function(e){
if(g.getFlag()>0&& p.isEdit){ //编辑模式时才添加具体标注对象 //flag?难道是点击markid对象后的状态?
var target = $($markObj.find("img")[g.getFlag()-1]).clone(); //获取点击markObjdom对象(img)并复制一份。
//计算需要标注的位置
var left = e.clientX- $modal.parent()[0].offsetLeft;//offsetLeft?
var top = e.clientY-$modal.parent()[0].offsetTop;
//生成标注对象并包装标注图标
var wrapper = g.addMarker(left,top,target);
g.selectedMarker = wrapper; //刚刚新增的元素直接为选中元素。
g.setFlag( -1);
$picture.css({cursor:"pointer"}); //鼠标样式复原
//标注对象模板的src作为事件的参数,为标注对象增加双击事件
var url = target.attr("src");
var params = {x:left,y:top,url:url};
g.addDblEvent(wrapper,params); //为标注对象增加事件 (编辑模式时)
}
});
},
// 初始化标注对象模板工具
initCursor:function(){
var p = this.options;
var g = this;
var $picture = g.getObj(); //jq对象 标注板
var $markObj = $("#"+p.markId);
//增加标注对象模板的单击事件。设置状态,让鼠标可以在蒙版上增加标注。即鼠标选择的标注模板,准备在模板标注。
$markObj.find("img").click(function(e){
e.stopPropagation();
g.setFlag($markObj.find("img").index($(this))+1) ;
var src = $(this).attr("src");
src = src.substring(0,src.lastIndexOf(".")+1)+"cur";
$picture.css({cursor:"crosshair"}); //一旦点选标注模板则鼠标状态为十字样式
if(p.hasSelfCur){ //使用用户设定的鼠标样式
$picture.css({cursor:"url("+src+"),auto"});
}
});
},
//
//获取数据
loadData:function(){
var p = this.options;
var g = this;
$.ajax({
type:"post",
url: p.markerUrl,
dataType:"json",
success:function(data){
var marks=data.marks;
//读取数据后根据获取的数据直接构造标记
for(var i= 0;i<marks.length;i++){
var mark = marks[i];
var $pic = $("<img src='"+mark.url+"'/>") ;
//使用比例计算而不是使用点状设置
// var x=mark.x
// var y=mark.y
var x=Math.round(mark.xp*p.picWidth);
var y=Math.round(mark.yp*p.picHeight);
//
// alert(""+x+""+y);
var $wrap = g.addMarker(x,y,$pic);
if(!p.isEdit){ //当不是编辑模式时,提供一个viewClick函数回调,在非编辑模式中提供单击事件扩展。
$wrap.click(function(){
if(p.viewClick){
p.viewClick.call(this,$wrap,mark);
}
});
}else{ //如果是编辑模式
g.addDblEvent($wrap,mark) ;
}
}
}
});
},
//
//可为标注对象增加双击事件
//
addDblEvent:function(wrapper,params){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $modal = $picture.find(".modal");
//标注对象的双击事件
//该部分做了扩展框架,主要是引用artDialog或iframe提供编辑框。并提供回调函数支持。
wrapper.dblclick(function(){
params.x = parseInt(wrapper.css("left"));
params.y = parseInt(wrapper.css("top"));
if(p.dblclick){ //自定义双击后的click。该部分更可以替换之后的hasDialoy的实现。内嵌弹出窗口是集成的默认实现。
p.dblclick.call(this,wrapper,params) ;
return;
}
if(! wrapper.attr("hasDialog")){
var dialog= $.artDialog({
title: p.popTitle,
width :p.popWidth,
height :p.popHeight,
left:parseInt($modal.parent()[0].offsetLeft)+(parseInt($picture.find("div").eq(0).width())-p.popWidth)/2,
top:parseInt($modal.parent()[0].offsetTop)+(parseInt($picture.find("div").eq(0).height())-p.popHeight)/2,
ok: function(){
if(p.okCallBack){
var $return = p.okCallBack.call(this);
if($return){
wrapper.removeAttr("hasDialog");
return true;
}
}
wrapper.removeAttr("hasDialog");
},cancel:function(){
wrapper.removeAttr("hasDialog");
if(p.cancelCallBack){
var $return = p.cancelCallBack.call(this);
if($return){
wrapper.removeAttr("hasDialog");
return true;
}
}
wrapper.removeAttr("hasDialog");
}
});
wrapper.attr("hasDialog",true);
//
var picPropertyUrl = g.getUrl();
for(var key in params){
picPropertyUrl +="&"+key+"="+params[key];
}
if(!p.frame){
$.ajax({
type:"post",
data:params,
url: g.getUrl(),
success:function(data){
dialog.content(data);
}
});
}else{
dialog.content("<iframe height='100%' width='100%' style='overflow:hidden' frameborder=0 src='"+picPropertyUrl+"'>");
}
}
})
},
//获取url的地址
getUrl:function(){
var p = this.options;
var url = p.url;
if(url.indexOf("?")>0){
return url;
}else{
return url +"?";
}
},
//用来获取设置标记板的状态,该状态是指用户点击标记对象模板后与一般状态的区别。-1标识一般状态 >0表示可以在标记板上单击增加标注。
setFlag:function(value){
this.flag = value;
},
getFlag:function(){
return this.flag;
},
//用来获取设置jq对象,即标记板主体。
setObj:function(obj){
this.obj = obj;
},
getObj:function(){
return this.obj;
},
/**
* 获取标注信息 ,返回JSON数组
* @return {Array}
*/
getMarkerInfo:function(){
var reObj={};
var ret = [];
var g = this;
var p = this.options;
var $picture = g.getObj();
var $modal = $picture.find(".modal");
//标注板信息
var markBoard={};
markBoard.picUrl=p.picUrl;
markBoard.width=p.picWidth;
markBoard.height=p.picHeight;
reObj.markBoard=markBoard;
//标注集合信息
var $markers = $(".marker",$modal);
var info;
var $marker;
for(var i=0;i<$markers.length;i++){
info = {};
$marker = $($markers[i]);
info.x = parseInt($marker.css("left"));
info.y = parseInt($marker.css("top"));
//增加百分比记录 FIXME临时使用option中的参数,其实应该提供一个缓存。
info.xp = info.x / p.picWidth;
info.yp = info.y / p.picHeight;
//
info.url = $marker.find("img").attr("src");
ret.push(info);
}
//复杂结构
reObj.marks=ret;
return reObj;
},
//删除所选标注对象
deleteSelectedMarker:function(){
var g = this;
if(g.selectedMarker){
g.selectedMarker.remove();
g.selectedMarker = null;
}
}
};
//可拖拽dom对象包装
Dragdrop = function(window){
var doc = window.document;
var E = {
on : function(el, type, fn){
el.addEventListener ?
el.addEventListener(type, fn, false) :
el.attachEvent ?
el.attachEvent("on" + type, fn) :
el['on'+type] = fn;
},
un : function(el,type,fn){
el.removeEventListener ?
el.removeEventListener(type, fn, false) :
el.detachEvent ?
el.detachEvent("on" + type, fn) :
el['on'+type] = null;
},
evt : function(e){
return e || window.event;
}
};
return function(opt){
var conf = null, defaultConf, diffX, diffY;
function Config(opt){
this.target = opt.target;
this.bridge = opt.bridge;
this.dragable = opt.dragable != false;
this.dragX = opt.dragX != false;
this.dragY = opt.dragY != false;
this.area = opt.area;
this.callback = opt.callback;
}
function Dragdrop(opt){
if(!opt){return;}
conf = new Config(opt);
defaultConf = new Config(opt);
conf.bridge ?
E.on(conf.bridge,'mousedown',mousedown) :
E.on(conf.target,'mousedown',mousedown);
}
Dragdrop.prototype = {
dragX : function(){
conf.dragX = true;
conf.dragY = false;
},
dragY : function(b){
conf.dragY = true;
conf.dragX = false;
},
dragAll : function(){
conf.dragX = true;
conf.dragY = true;
},
setArea : function(a){
conf.area = a;
},
setBridge : function(b){
conf.bridge = b;
},
setDragable : function(b){
conf.dragable = b;
},
reStore : function(){
conf = new Config(defaultConf);
conf.target.style.top = '0px';
conf.target.style.left = '0px';
},
getDragX : function(){
return conf.dragX;
},
getDragY : function(){
return conf.dragY;
}
};
function mousedown(e){
e = E.evt(e);
var el = conf.target;
el.style.position = 'absolute';
el.style.cursor = 'move';
if(el.setCapture){ //IE
E.on(el, "losecapture", mouseup);
el.setCapture();
e.cancelBubble = true;
}else if(window.captureEvents){ //标准DOM
e.stopPropagation();
E.on(window, "blur", mouseup);
e.preventDefault();
}
diffX = e.clientX - el.offsetLeft;
diffY = e.clientY - el.offsetTop;
E.on(doc,'mousemove',mousemove);
E.on(doc,'mouseup',mouseup);
}
function mousemove(e){
var el = conf.target, e = E.evt(e), moveX = e.clientX - diffX, moveY = e.clientY - diffY;
var minX, maxX, minY, maxY;
if(conf.area){
minX = conf.area[0];
maxX = conf.area[1];
minY = conf.area[2];
maxY = conf.area[3];
moveX < minX && (moveX = minX); // left 最小值
moveX > maxX && (moveX = maxX); // left 最大值
moveY < minY && (moveY = minY); // top 最小值
moveY > maxY && (moveY = maxY); // top 最大值
}
if(conf.dragable){
conf.dragX && (el.style.left = moveX + 'px');
conf.dragY && (el.style.top = moveY + 'px');
if(conf.callback){
var obj = {moveX:moveX,moveY:moveY};
conf.callback.call(conf,obj);
}
}
}
function mouseup(e){
var el = conf.target;
el.style.cursor = 'default';
E.un(doc,'mousemove',mousemove);
E.un(doc,'mouseup',mouseup);
if(el.releaseCapture){ //IE
E.un(el, "losecapture", mouseup);
el.releaseCapture();
}
if(window.releaseEvents){ //标准DOM
E.un(window, "blur", mouseup);
}
}
return new Dragdrop(opt);
}
}(this);
})(jQuery); | identifier_body | ||
marker.js | /**
*
* 图片标记器
* Author: tianchungang,wangfeng
* e-mail: wfeng007@163.com
* Date: 14-1-12
* Time: 下午8:22
* version: 0.2
*/
(function ($)
{
//这三个是?
var MarkerManager = {
container:{}
};
MarkerManager.setMarker = function(id,marker){
this.container[id] = marker;
};
MarkerManager.getMarker = function(id){
return this.container[id];
};
//
//jquery对象增加方法marker用来生成
$.fn.marker = function (options)
{
this.options = {
//基本参数
picUrl:"", //底层图片的地址
markId:"", //指定用来标注模板对象集合的ID //如:ul li组合。ul id="markerTag",li包含标注图片。
markerUrl:"", //标注点 数据远程url //获取标注信息(如x、y,数据内容等)的地址,使用post获取。该部分不为null才initjq对象时才会调用内部的loadData函数。
picWidth:null, //图片展示宽度 //createHtml函数中使用
picHeight:null, //图片展示高度 //createHtml函数中使用
isEdit:false, //设置为true,双击标注图标能进行属性编辑 //确定标记板当前为何种模式
//edit mode 编辑模式扩展参数
popWidth:200, //弹出窗口宽度 //编辑模式时点击标注弹出框的大小
popHeight:200, //弹出窗口内容高度 //编辑模式时点击标注弹出框的大小
frame:false, //编辑模式时点击标注弹出框是否是iframe方式
url:"", //远程数据url //这个其实是编辑模式中,当用户点击标注时,在对话框中显示内容的url。frame为true时为frame引用的路径。frame为false时是用post获取该路径的数据。
popTitle:"标注坐标点", //弹窗标题 //编辑模式时点击标注弹出框的大小
hasSelfCur:false, //是否采用自定义cursor //点击标记对象模板后自定义的标记样式。是标记对象模板的src属性制定。
okCallBack:null, //弹窗确定按钮的回调函数,可添加保存数据功能 //编辑模式时点击标注弹出框提供了默认的ok按钮。按钮点击后的回调。(编辑模式下的frame为true时。)
cancelCallBack:null, //编辑模式时点击标注弹出框提供了默认的cancel按钮。按钮点击后的回调。(编辑模式下的frame为true时。)
//view mode 展示模式扩展参数
viewClick:null, //在展示编辑模式下为标注提供的单击扩展回调,如果为null则直接为alert显示标注数据。
//未使用的参数或遗留参数
data:null, //本地渲染数据(坐标与标记图标的地址) //TODO 似乎暂时没有用到
viwUrl:"" //远程展示数据url // TODO 似乎暂时没有用到。
};
$.extend(this.options,options); //参数合并到this.options参数对象
return $.marker.init(this); //返回初始化的marker对象。
};
//获取当前id的值?
$.fn.getMarker = function(){
$(this).attr("id");
};
//marker对象
//?这种写法是否一个页面就只有一个可以标注的的图片了?是否改用prototype或闭包写法。
$.marker = {
init:function(obj){
var p = this.options = obj.options;
var g = this; //marker对象
g.setObj(obj); //jq对象
g.setFlag(-1); //设置标注模板对象选中状态,当状态大于0时,可以单击标注板
g.createHtml(); //render界面
g.initCursor(); //
g.initEvent(); //增加交互用的事件
//
if( p.markerUrl){ //
g.loadData();
}
return this;
},
//创建界面的html元素
//包括一个外部相对布局的外框,内部的底层图片、位于图片上层的标注所在的蒙版(透明并覆盖提成图片。markerlayer)。
//图片显示大小由插件参数picWidth、picHeight决定。
// TODO 建议提供针对已有img对象的包装。直接获取img对象的长宽作为蒙版div的长宽。
createHtml:function(){
var p = this.options;
var g = this;
//
//如果目标jquerydiv中包含img元素则直接使用该img作为图片源。
var $img=this.getObj().children("img").first();
var img=$img.get(0);//dom
if(img){
p.picUrl=$img.attr("src");
p.picWidth=$img.width(); //当前展示的大小
p.picHeight=$img.height(); //当前展示的大小
}
//
var htmlArr = [];
var width = p.picWidth?'width:'+ p.picWidth+'px;':'';
var height = p.picHeight?'height:'+ p.picHeight+'px;':'';
htmlArr.push('<div style="position: relative;'+width+ height+'">');//FIXME 是不是少了结束标志?
htmlArr.push('<img class="baseDrawing" src="'+ p.picUrl+'" style="'+width+height+'" />');
htmlArr.push('<div class="modal" style="position: absolute;z-index: 99;'+width+height+';left:0;float: left;top: 0;"></div>');
htmlArr.push('</div>');
g.getObj().html(htmlArr.join(""));
//补齐p.picWidth p.picHeight,marks绘图使用p的这两个参数作为大小缓存参考使用。FIXME 之后应该直接根据标记板大小作为计算依据。
var $img= g.getObj().find("img.baseDrawing").first();
p.picWidth=$img.width(); //当前展示的大小
p.picHeight=$img.height(); //当前展示的大小
},
//创建蒙版中的标注。(wrapper)生成一个div并放入标注的img对象picObj。
//返回该标注对象
addMarker:function(x,y,picObj){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $modal = $picture.find(".modal");//找到蒙版
var wrapper = $("<div style='position: absolute;' class='marker'/>");
//addMarker的时候增加单击事件
wrapper.bind("click",function(){//单击事件
g.selectedMarker = wrapper; //单击选中
}) ;
wrapper.css({left: x,top: y});
picObj.appendTo(wrapper);
$modal.append(wrapper);
if(p.isEdit){
//编辑状态下包装成可dd的dom
new Dragdrop({
target : wrapper[0] ,
area:[0,$picture.width()-wrapper.width(),0,$picture.height()-wrapper.height()]
});
}
return wrapper;
},
//
// 为界面元素增加操作事件功能
//
initEvent:function(){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $markObj = $("#"+p.markId);
var $modal = $picture.find(".modal");
//jq对象增加单击事件,用来增加标注对象
$picture.click(function(e){
if(g.getFlag()>0&& p.isEdit){ //编辑模式时才添加具体标注对象 //flag?难道是点击markid对象后的状态?
var target = $($markObj.find("img")[g.getFlag()-1]).clone(); //获取点击markObjdom对象(img)并复制一份。
//计算需要标注的位置
var left = e.clientX- $modal.parent()[0].offsetLeft;//offsetLeft?
var top = e.clientY-$modal.parent()[0].offsetTop;
//生成标注对象并包装标注图标
var wrapper = g.addMarker(left,top,target);
g.selectedMarker = wrapper; //刚刚新增的元素直接为选中元素。
g.setFlag( -1);
$picture.css({cursor:"pointer"}); //鼠标样式复原
//标注对象模板的src作为事件的参数,为标注对象增加双击事件
var url = target.attr("src");
var params = {x:left,y:top,url:url};
g.addDblEvent(wrapper,params); //为标注对象增加事件 (编辑模式时)
}
});
},
// 初始化标注对象模板工具
initCursor:function(){
var p = this.options;
var g = this;
var $picture = g.getObj(); //jq对象 标注板
var $markObj = $("#"+p.markId);
//增加标注对象模板的单击事件。设置状态,让鼠标可以在蒙版上增加标注。即鼠标选择的标注模板,准备在模板标注。
$markObj.find("img").click(function(e){
e.stopPropagation();
g.setFlag($markObj.find("img").index($(this))+1) ;
var src = $(this).attr("src");
src = src.substring(0,src.lastIndexOf(".")+1)+"cur";
$picture.css({cursor:"crosshair"}); //一旦点选标注模板则鼠标状态为十字样式
if(p.hasSelfCur){ //使用用户设定的鼠标样式
$picture.css({cursor:"url("+src+"),auto"});
}
});
},
//
//获取数据
loadData:function(){
var p = this.options;
var g = this;
$.ajax({
type:"post",
url: p.markerUrl,
dataType:"json",
success:function(data){
var marks=data.marks;
//读取数据后根据获取的数据直接构造标记
for(var i= 0;i<marks.length;i++){
var mark = marks[i];
var $pic = $("<img src='"+mark.url+"'/>") ;
//使用比例计算而不是使用点状设置
// var x=mark.x
// var y=mark.y
var x=Math.round(mark.xp*p.picWidth);
var y=Math.round(mark.yp*p.picHeight);
//
// alert(""+x+""+y);
var $wrap = g.addMarker(x,y,$pic);
if(!p.isEdit){ //当不是编辑模式时,提供一个viewClick函数回调,在非编辑模式中提供单击事件扩展。
$wrap.click(function(){
if(p.viewClick){
p.viewClick.call(this,$wrap,mark);
}
});
}else{ //如果是编辑模式
g.addDblEvent($wrap,mark) ;
}
}
}
});
},
//
//可为标注对象增加双击事件
//
addDblEvent:function(wrapper,params){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $modal = $picture.find(".modal");
//标注对象的双击事件
//该部分做了扩展框架,主要是引用artDialog或iframe提供编辑框。并提供回调函数支持。
wrapper.dblclick(function(){
params.x = parseInt(wrapper.css("left"));
params.y = parseInt(wrapper.css("top"));
if(p.dblclick){ //自定义双击后的click。该部分更可以替换之后的hasDialoy的实现。内嵌弹出窗口是集成的默认实现。
p.dblclick.call(this,wrapper,params) ;
return;
}
if(! wrapper.attr("hasDialog")){
var dialog= $.artDialog({
title: p.popTitle,
width :p.popWidth,
height :p.popHeight,
left:parseInt($modal.parent()[0].offsetLeft)+(parseInt($picture.find("div").eq(0).width())-p.popWidth)/2,
top:parseInt($modal.parent()[0].offsetTop)+(parseInt($picture.find("div").eq(0).height())-p.popHeight)/2,
ok: function(){
if(p.okCallBack){
var $return = p.okCallBack.call(this);
if($return){
wrapper.removeAttr("hasDialog");
return true;
}
}
wrapper.removeAttr("hasDialog");
},cancel:function(){
wrapper.removeAttr("hasDialog");
if(p.cancelCallBack){
var $return = p.cancelCallBack.call(this);
if($return){
wrapper.removeAttr("hasDialog");
return true;
}
}
wrapper.removeAttr("hasDialog");
}
});
wrapper.attr("hasDialog",true);
//
var picPropertyUrl = g.getUrl();
for(var key in params){
picPropertyUrl +="&"+key+"="+params[key];
}
if(!p.frame){
$.ajax({
type:"post",
data:params,
url: g.getUrl(),
success:function(data){
dialog.content(data);
}
});
}else{
dialog.content("<iframe height='100%' width='100%' style='overflow:hidden' frameborder=0 src='"+picPropertyUrl+"'>");
}
}
})
},
//获取url的地址
getUrl:function(){
var p = this.options;
var url = p.url;
if(url.indexOf("?")>0){
return url;
}else{
return url +"?";
}
},
//用来获取设置标记板的状态,该状态是指用户点击标记对象模板后与一般状态的区别。-1标识一般状态 >0表示可以在标记板上单击增加标注。
setFlag:function(value){
this.flag = value;
},
getFlag:function(){
return this.flag;
},
//用来获取设置jq对象,即标记板主体。
setObj:function(obj){
this.obj = obj;
},
getObj:function(){
return this.obj;
},
/**
* 获取标注信息 ,返回JSON数组
* @return {Array}
*/
getMarkerInfo:function(){
var reObj={};
var ret = [];
var g = this;
var p = this.options;
var $picture = g.getObj();
var $modal = $picture.find(".modal");
//标注板信息
var markBoard={};
markBoard.picUrl=p.picUrl;
markBoard.width=p.picWidth;
markBoard.height=p.picHeight;
reObj.markBoard=markBoard;
//标注集合信息
var $markers = $(".marker",$modal);
var info;
var $marker;
for(var i=0;i<$markers.length;i++){
info = {};
$marker = $($markers[i]);
info.x = parseInt($marker.css("left"));
info.y = parseInt($marker.css("top"));
//增加百分比记录 FIXME临时使用option中的参数,其实应该提供一个缓存。
info.xp = info.x / p.picWidth;
info.yp = info.y / p.picHeight;
//
info.url = $marker.find("img").attr("src");
ret.push(info);
}
//复杂结构
reObj.marks=ret;
return reObj;
},
//删除所选标注对象
deleteSelectedMarker:function(){
var g = this;
if(g.selectedMarker){
g.selectedMarker.remove();
g.selectedMarker = null;
}
}
};
//可拖拽dom对象包装
Dragdrop = function(window){
var doc = window.document;
var E = {
on : function(el, type, fn){
el.addEventListener ?
el.addEventListener(type, fn, false) :
el.attachEvent ?
el.attachEvent("on" + type, fn) :
el['on'+type] = fn;
},
un : function(el,type,fn){
el.removeEventListener ?
el.removeEventListener(type, fn, false) :
el.detachEvent ?
el.detachEvent("on" + type, fn) :
el['on'+type] = null;
},
evt : function(e){
return e || window.event;
}
};
return function(opt){
var conf = null, defaultConf, diffX, diffY;
function Config(opt){
this.target = opt.target;
this.bridge = opt.bridge;
this.dragable = opt.dragable != false;
this.dragX = opt.dragX != false;
this.dragY = opt.dragY != false;
this.area = opt.area;
this.callback = opt.callback;
}
function Dragdrop(opt){
if(!opt){return;}
conf = new Config(opt);
defaultConf = new Config(opt);
conf.bridge ?
E.on(conf.bridge,'mousedown',mousedown) :
E.on(conf.target,'mousedown',mousedown);
}
Dragdrop.prototype = {
dragX : function(){
conf.dragX = true;
conf.dragY = false;
},
dragY : function(b){
conf.dragY = true;
conf.dragX = false;
},
dragAll : function(){
conf.dragX = true;
conf.dragY = true;
},
setArea : function(a){
conf.area = a;
},
setBridge : function(b){
conf.bridge = b;
},
setDragable : function(b){
conf.dragable = b;
},
reStore : function(){
conf = new Config(defaultConf);
conf.target.style.top = '0px';
conf.target.style.left = '0px';
},
getDragX : function(){
return conf.dragX;
},
getDragY : function(){
return conf.dragY;
}
};
function mousedown(e){
e = E.evt(e);
var el = conf.target;
el.style.position = 'absolute';
el.style.cursor = 'move';
if(el.setCapture){ //IE
E.on(el, "losecapture", mouseup);
el.setCapture();
e.cancelBubble = true;
}else if(window.captureEvents){ //标准DOM
e.stopPropagation();
E.on(window, "blur", mouseup);
e.preventDefault();
}
diffX = e.clientX - el.offsetLeft;
diffY = e.clientY - el.offsetTop;
E.on(doc,'mousemove',mousemove);
E.on(doc,'mouseup',mouseup);
}
function mousemove(e){
var el = conf.target, e = E.evt(e), moveX = e.clientX - diffX, moveY = e.clientY - diffY;
var minX, maxX, minY, maxY;
if(conf.area){
minX = conf.area[0];
maxX = conf.area[1];
minY = conf.area[2];
maxY = conf.area[3];
moveX < minX && (moveX = minX); // left 最小值
moveX > maxX && (moveX = maxX); // left 最大值
moveY < minY && (moveY = minY); // top 最小值
moveY > maxY && (moveY = maxY); // top 最大值
}
if(conf.dragable){
conf.dragX && (el.style.left = moveX + 'px');
conf.dragY && (el.style.top = moveY + 'px');
if(conf.callback){
var obj = {moveX:moveX,moveY:moveY};
conf.callback.call(conf,obj);
}
}
}
function mouseup(e){
var el = conf.target;
el.style.cursor = 'default';
E.un(doc,'mousemove',mousemove);
E.un(doc,'mouseup',mouseup);
if(el.releaseCapture){ //IE
E.un(el, "losecapture", mouseup);
el.releaseCapture();
}
if(window.releaseEvents){ //标准DOM
E.un(window, "blur", mouseup);
}
} | return new Dragdrop(opt);
}
}(this);
})(jQuery); | random_line_split | |
marker.js | /**
*
* 图片标记器
* Author: tianchungang,wangfeng
* e-mail: wfeng007@163.com
* Date: 14-1-12
* Time: 下午8:22
* version: 0.2
*/
(function ($)
{
//这三个是?
var MarkerManager = {
container:{}
};
MarkerManager.setMarker = function(id,marker){
this.container[id] = marker;
};
MarkerManager.getMarker = function(id){
return this.container[id];
};
//
//jquery对象增加方法marker用来生成
$.fn.marker = function (options)
{
this.options = {
//基本参数
picUrl:"", //底层图片的地址
markId:"", //指定用来标注模板对象集合的ID //如:ul li组合。ul id="markerTag",li包含标注图片。
markerUrl:"", //标注点 数据远程url //获取标注信息(如x、y,数据内容等)的地址,使用post获取。该部分不为null才initjq对象时才会调用内部的loadData函数。
picWidth:null, //图片展示宽度 //createHtml函数中使用
picHeight:null, //图片展示高度 //createHtml函数中使用
isEdit:false, //设置为true,双击标注图标能进行属性编辑 //确定标记板当前为何种模式
//edit mode 编辑模式扩展参数
popWidth:200, //弹出窗口宽度 //编辑模式时点击标注弹出框的大小
popHeight:200, //弹出窗口内容高度 //编辑模式时点击标注弹出框的大小
frame:false, //编辑模式时点击标注弹出框是否是iframe方式
url:"", //远程数据url //这个其实是编辑模式中,当用户点击标注时,在对话框中显示内容的url。frame为true时为frame引用的路径。frame为false时是用post获取该路径的数据。
popTitle:"标注坐标点", //弹窗标题 //编辑模式时点击标注弹出框的大小
hasSelfCur:false, //是否采用自定义cursor //点击标记对象模板后自定义的标记样式。是标记对象模板的src属性制定。
okCallBack:null, //弹窗确定按钮的回调函数,可添加保存数据功能 //编辑模式时点击标注弹出框提供了默认的ok按钮。按钮点击后的回调。(编辑模式下的frame为true时。)
cancelCallBack:null, //编辑模式时点击标注弹出框提供了默认的cancel按钮。按钮点击后的回调。(编辑模式下的frame为true时。)
//view mode 展示模式扩展参数
viewClick:null, //在展示编辑模式下为标注提供的单击扩展回调,如果为null则直接为alert显示标注数据。
//未使用的参数或遗留参数
data:null, //本地渲染数据(坐标与标记图标的地址) //TODO 似乎暂时没有用到
viwUrl:"" //远程展示数据url // TODO 似乎暂时没有用到。
};
$.extend(this.options,options); //参数合并到this.options参数对象
return $.marker.init(this); //返回初始化的marker对象。
};
//获取当前id的值?
$.fn.getMarker = function(){
$(this).attr("id");
};
//marker对象
//?这种写法是否一个页面就只有一个可以标注的的图片了?是否改用prototype或闭包写法。
$.marker = {
init:function(obj){
var p = this.options = obj.options;
var g = this; //marker对象
g.setObj(obj); //jq对象
g.setFlag(-1); //设置标注模板对象选中状态,当状态大于0时,可以单击标注板
g.createHtml(); //render界面
g.initCursor(); //
g.initEvent(); //增加交互用的事件
//
if( p.markerUrl){ //
g.loadData();
}
return this;
},
//创建界面的html元素
//包括一个外部相对布局的外框,内部的底层图片、位于图片上层的标注所在的蒙版(透明并覆盖提成图片。markerlayer)。
//图片显示大小由插件参数picWidth、picHeight决定。
// TODO 建议提供针对已有img对象的包装。直接获取img对象的长宽作为蒙版div的长宽。
createHtml:function(){
var p = this.options;
var g = this;
//
//如果目标jquerydiv中包含img元素则直接使用该img作为图片源。
var $img=this.getObj().children("img").first();
var img=$img.get(0);//dom
if(img){
p.picUrl=$img.attr("src");
p.picWidth=$img.width(); //当前展示的大小
p.picHeight=$img.height(); //当前展示的大小
}
//
var htmlArr = [];
var width = p.picWidth?'width:'+ p.picWidth+'px;':'';
var height = p.picHeight?'height:'+ p.picHeight+'px;':'';
htmlArr.push('<div style="position: relative;'+width+ height+'">');//FIXME 是不是少了结束标志?
htmlArr.push('<img class="baseDrawing" src="'+ p.picUrl+'" style="'+width+height+'" />');
htmlArr.push('<div class="modal" style="position: absolute;z-index: 99;'+width+height+';left:0;float: left;top: 0;"></div>');
htmlArr.push('</div>');
g.getObj().html(htmlArr.join(""));
//补齐p.picWidth p.picHeight,marks绘图使用p的这两个参数作为大小缓存参考使用。FIXME 之后应该直接根据标记板大小作为计算依据。
var $img= g.getObj().find("img.baseDrawing").first();
p.picWidth=$img.width(); //当前展示的大小
p.picHeight=$img.height(); //当前展示的大小
},
//创建蒙版中的标注。(wrapper)生成一个div并放入标注的img对象picObj。
//返回该标注对象
addMarker:function(x,y,picObj){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $modal = $picture.find(".modal");//找到蒙版
var wrapper = $("<div style='position: absolute;' class='marker'/>");
//addMarker的时候增加单击事件
wrapper.bind("click",function(){//单击事件
g.selectedMarker = wrapper; //单击选中
}) ;
wrapper.css({left: x,top: y});
picObj.appendTo(wrapper);
$modal.append(wrapper);
if(p.isEdit){
//编辑状态下包装成可dd的dom
new Dragdrop({
target : wrapper[0] ,
area:[0,$picture.width()-wrapper.width(),0,$picture.height()-wrapper.height()]
});
}
return wrapper;
},
//
// 为界面元素增加操作事件功能
//
initEvent:function(){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $markObj = $("#"+p.markId);
var $modal = $picture.find(".modal");
//jq对象增加单击事件,用来增加标注对象
$picture.click(function(e){
if(g.getFlag()>0&& p.isEdit){ //编辑模式时才添加具体标注对象 //flag?难道是点击markid对象后的状态?
var target = $($markObj.find("img")[g.getFlag()-1]).clone(); //获取点击markObjdom对象(img)并复制一份。
//计算需要标注的位置
var left = e.clientX- $modal.parent()[0].offsetLeft;//offsetLeft?
var top = e.clientY-$modal.parent()[0].offsetTop;
//生成标注对象并包装标注图标
var wrapper = g.addMarker(left,top,target);
g.selectedMarker = wrapper; //刚刚新增的元素直接为选中元素。
g.setFlag( -1);
$picture.css({cursor:"pointer"}); //鼠标样式复原
//标注对象模板的src作为事件的参数,为标注对象增加双击事件
var url = target.attr("src");
var params = {x:left,y:top,url:url};
g.addDblEvent(wrapper,params); //为标注对象增加事件 (编辑模式时)
}
});
},
// 初始化标注对象模板工具
initCursor:function(){
var p = this.options;
var g = this;
var $picture = g.getObj(); //jq对象 标注板
var $markObj = $("#"+p.markId);
//增加标注对象模板的单击事件。设置状态,让鼠标可以在蒙版上增加标注。即鼠标选择的标注模板,准备在模板标注。
$markObj.find("img").click(function(e){
e.stopPropagation();
g.setFlag($markObj.find("img").index($(this))+1) ;
var src = $(this).attr("src");
src = src.substring(0,src.lastIndexOf(".")+1)+"cur";
$picture.css({cursor:"crosshair"}); //一旦点选标注模板则鼠标状态为十字样式
if(p.hasSelfCur){ //使用用户设定的鼠标样式
$picture.css({cursor:"url("+src+"),auto"});
}
});
},
//
//获取数据
loadData:function(){
var p = this.options;
var g = this;
$.ajax({
type:"post",
url: p.markerUrl,
dataType:"json",
success:function(data){
var marks=data.marks;
//读取数据后根据获取的数据直接构造标记
for(var i= 0;i<marks.length;i++){
var mark = marks[i];
var $pic = $("<img src='"+mark.url+"'/>") ;
//使用比例计算而不是使用点状设置
// var x=mark.x
// var y=mark.y
var x=Math.round(mark.xp*p.picWidth);
var y=Math.round(mark.yp*p.picHeight);
//
// alert(""+x+""+y);
var $wrap = g.addMarker(x,y,$pic);
if(!p.isEdit){ //当不是编辑模式时,提供一个viewClick函数回调,在非编辑模式中提供单击事件扩展。
$wrap.click(function(){
if(p.viewClick){
p.viewClick.call(this,$wrap,mark);
}
});
}else{ //如果是编辑模式
g.addDblEvent($wrap,mark) ;
}
}
}
});
},
//
//可为标注对象增加双击事件
//
addDblEvent:function(wrapper,params){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $modal = $picture.find(".modal");
//标注对象的双击事件
//该部分做了扩展框架,主要是引用artDialog或iframe提供编辑框。并提供回调函数支持。
wrapper.dblclick(function(){
params.x = parseInt(wrapper.css("left"));
params.y = parseInt(wrapper.css("top"));
if(p.dblclick){ //自定义双击后的click。该部分更可以替换之后的hasDialoy的实现。内嵌弹出窗口是集成的默认实现。
p.dblclick.call(this,wrapper,params) ;
return;
}
if(! wrapper.attr("hasDialog")){
var dialog= $.artDialog({
title: p.popTitle,
width :p.popWidth,
height :p.popHeight,
left:parseInt($modal.parent()[0].offsetLeft)+(parseInt($picture.find("div").eq(0).width())-p.popWidth)/2,
top:parseInt($modal.parent()[0].offsetTop)+(parseInt($picture.find("div").eq(0).height())-p.popHeight)/2,
ok: function(){
if(p.okCallBack){
var $return = p.okCallBack.call(this);
if($return){
wrapper.removeAttr("hasDialog");
return true;
}
}
wrapper.removeAttr("hasDialog");
},cancel:function(){
wrapper.removeAttr("hasDialog");
if(p.cancelCallBack){
var $return = p.cancelCallBack.call(this);
if($return){
wrapper.removeAttr("hasDialog");
return true;
}
}
wrapper.removeAttr("hasDialog");
}
});
wrapper.attr("hasDialog",true);
//
var picPropertyUrl = g.getUrl();
for(var key in params){
picPropertyUrl +="&"+key+"="+params[key];
}
if(!p.frame){
$.ajax({
type:"post",
data:params,
url: g.getUrl(),
success:function(data){
dialog.content(data);
}
});
}else{
dialog.content("<iframe height='100%' width='100%' style='overflow:hidden' frameborder=0 src='"+picPropertyUrl+"'>");
}
}
})
},
//获取url的地址
getUrl:function(){
var p = this.options;
var url = p.url;
if(url.indexOf("?")>0){
return url;
}else{
return url +"?";
}
},
//用来获取设置标记板的状态,该状态是指用户点击标记对象模板后与一般状态的区别。-1标识一般状态 >0表示可以在标记板上单击增加标注。
setFlag:function(value){
this.flag = value;
},
getFlag:function(){
return this.flag;
},
//用来获取设置jq对象,即标记板主体。
setObj:function(obj){
this.obj = obj;
},
getObj:function(){
return this.obj;
},
/**
* 获取标注信息 ,返回JSON数组
* @return {Array}
*/
getMarkerInfo:function(){
var reObj={};
var ret = [];
var g = this;
var p = this.options;
var $picture = g.getObj();
var $modal = $picture.find(".modal");
//标注板信息
var markBoard={};
markBoard.picUrl=p.picUrl;
markBoard.width=p.picWidth;
markBoard.height=p.picHeight;
reObj.markBoard=markBoard;
//标注集合信息
var $markers = $(".marker",$modal);
var info;
var $marker;
for(var i=0;i<$markers.length;i++){
info = {};
$marker = $($markers[i]);
info.x = parseInt($marker.css("left"));
info.y = parseInt($marker.css("top"));
//增加百分比记录 FIXME临时使用option中的参数,其实应该提供一个缓存。
info.xp = info.x / p.picWidth;
info.yp = info.y / p.picHeight;
//
info.url = $marker.find("img").attr("src");
ret.push(info);
}
//复杂结构
reObj.marks=ret;
return reObj;
},
//删除所选标注对象
deleteSelectedMarker:function(){
var g = this;
if(g.selectedMarker){
g.selectedMarker.remove();
g.selectedMarker = null;
}
}
};
//可拖拽dom对象包装
Dragdrop = function(window){
var doc = window.document;
var E = {
on : function(el, type, fn){
el.addEventListener ?
el.addEventListener(type, fn, false) :
el.attachEvent ?
el.attachEvent("on" + type, fn) :
el['on'+type] = fn;
},
un : function(el,type,fn){
el.removeEventListener ?
el.removeEventListener(type, fn, false) :
el.detachEvent ?
el.detachEvent("on" + type, fn) :
el['on'+type] = null;
},
evt : function(e){
return e || window.event;
}
};
return function(opt){
var conf = null, defaultConf, diffX, diffY;
function Config(opt){
this.target = opt.target;
this.bridge = opt.bridge;
this.dragable = opt.dragable != false;
this.dragX = opt.dragX != false;
this.dragY = opt.dragY != false;
this.area = opt.area;
this.callback = opt.callback;
}
function Dragdrop(opt){
if(!opt){return;}
conf = new Config(opt);
defaultConf = new Config(opt);
conf.bridge ?
E.on(conf.bridge,'mousedown',mousedown) :
E.on(conf.target,'mousedown',mousedown);
}
Dragdrop.prototype = {
dragX : function(){
conf.dragX = true;
conf.dragY = false;
},
dragY : function(b){
conf.dragY = true;
conf.dragX = false;
},
dragAll : function(){
conf.dragX = true;
conf.dragY = true;
},
setArea : function(a){
conf.area = a;
},
setBridge : function(b){
conf.bridge = b;
},
setDragable : function(b){
conf.dragable = b;
},
reStore : function(){
conf = new Config(defaultConf);
conf.target.style.top = '0px';
conf.target.style.left = '0px';
},
getDragX : function(){
return conf.dragX;
},
getDragY : function(){
return conf.dragY;
}
};
function mousedown(e){
e = E.evt(e);
var el = conf.target;
el.style.position = 'absolute';
el.style.cursor = 'move';
if(el.setCapture){ //IE
E.on(el, "losecapture", mouseup);
el.setCapture();
e.cancelBubble = true;
}else if(window.captureEvents){ //标准DOM
e.stopPropagation();
E.on(window, "blur", mouseup);
e.preventDefault();
}
diffX = e.clientX - el.offsetLeft;
diffY = e.clientY - el.offsetTop;
E.on(doc,'mousemove',mousemove);
E.on(doc,'mouseup',mouseup);
}
function mousemove(e){
var el = conf.target, e = E.evt(e), moveX = e.clientX - diffX, moveY = e.clientY - diffY;
var minX, maxX, minY, maxY;
if(conf.area){
minX = conf.area[0];
maxX = conf.area[1];
minY = conf.area[2];
maxY = conf.area[3];
moveX < minX && (moveX = minX); // left 最小值
moveX > maxX && (moveX = maxX); // left 最大值
moveY < minY && (moveY = minY); // top 最小值
moveY > maxY && (moveY = maxY); // top 最大值
}
if(conf.dragable){
conf.dragX && (el. | ft = moveX + 'px');
conf.dragY && (el.style.top = moveY + 'px');
if(conf.callback){
var obj = {moveX:moveX,moveY:moveY};
conf.callback.call(conf,obj);
}
}
}
function mouseup(e){
var el = conf.target;
el.style.cursor = 'default';
E.un(doc,'mousemove',mousemove);
E.un(doc,'mouseup',mouseup);
if(el.releaseCapture){ //IE
E.un(el, "losecapture", mouseup);
el.releaseCapture();
}
if(window.releaseEvents){ //标准DOM
E.un(window, "blur", mouseup);
}
}
return new Dragdrop(opt);
}
}(this);
})(jQuery); | style.le | identifier_name |
marker.js | /**
*
* 图片标记器
* Author: tianchungang,wangfeng
* e-mail: wfeng007@163.com
* Date: 14-1-12
* Time: 下午8:22
* version: 0.2
*/
(function ($)
{
//这三个是?
var MarkerManager = {
container:{}
};
MarkerManager.setMarker = function(id,marker){
this.container[id] = marker;
};
MarkerManager.getMarker = function(id){
return this.container[id];
};
//
//jquery对象增加方法marker用来生成
$.fn.marker = function (options)
{
this.options = {
//基本参数
picUrl:"", //底层图片的地址
markId:"", //指定用来标注模板对象集合的ID //如:ul li组合。ul id="markerTag",li包含标注图片。
markerUrl:"", //标注点 数据远程url //获取标注信息(如x、y,数据内容等)的地址,使用post获取。该部分不为null才initjq对象时才会调用内部的loadData函数。
picWidth:null, //图片展示宽度 //createHtml函数中使用
picHeight:null, //图片展示高度 //createHtml函数中使用
isEdit:false, //设置为true,双击标注图标能进行属性编辑 //确定标记板当前为何种模式
//edit mode 编辑模式扩展参数
popWidth:200, //弹出窗口宽度 //编辑模式时点击标注弹出框的大小
popHeight:200, //弹出窗口内容高度 //编辑模式时点击标注弹出框的大小
frame:false, //编辑模式时点击标注弹出框是否是iframe方式
url:"", //远程数据url //这个其实是编辑模式中,当用户点击标注时,在对话框中显示内容的url。frame为true时为frame引用的路径。frame为false时是用post获取该路径的数据。
popTitle:"标注坐标点", //弹窗标题 //编辑模式时点击标注弹出框的大小
hasSelfCur:false, //是否采用自定义cursor //点击标记对象模板后自定义的标记样式。是标记对象模板的src属性制定。
okCallBack:null, //弹窗确定按钮的回调函数,可添加保存数据功能 //编辑模式时点击标注弹出框提供了默认的ok按钮。按钮点击后的回调。(编辑模式下的frame为true时。)
cancelCallBack:null, //编辑模式时点击标注弹出框提供了默认的cancel按钮。按钮点击后的回调。(编辑模式下的frame为true时。)
//view mode 展示模式扩展参数
viewClick:null, //在展示编辑模式下为标注提供的单击扩展回调,如果为null则直接为alert显示标注数据。
//未使用的参数或遗留参数
data:null, //本地渲染数据(坐标与标记图标的地址) //TODO 似乎暂时没有用到
viwUrl:"" //远程展示数据url // TODO 似乎暂时没有用到。
};
$.extend(this.options,options); //参数合并到this.options参数对象
return $.marker.init(this); //返回初始化的marker对象。
};
//获取当前id的值?
$.fn.getMarker = function(){
$(this).attr("id");
};
//marker对象
//?这种写法是否一个页面就只有一个可以标注的的图片了?是否改用prototype或闭包写法。
$.marker = {
init:function(obj){
var p = this.options = obj.options;
var g = this; //marker对象
g.setObj(obj); //jq对象
g.setFlag(-1); //设置标注模板对象选中状态,当状态大于0时,可以单击标注板
g.createHtml(); //render界面
g.initCursor(); //
g.initEvent(); //增加交互用的事件
//
if( p.markerUrl){ //
g.loadData();
}
return this;
},
//创建界面的html元素
//包括一个外部相对布局的外框,内部的底层图片、位于图片上层的标注所在的蒙版(透明并覆盖提成图片。markerlayer)。
//图片显示大小由插件参数picWidth、picHeight决定。
// TODO 建议提供针对已有img对象的包装。直接获取img对象的长宽作为蒙版div的长宽。
createHtml:function(){
var p = this.options;
var g = this;
//
//如果目标jquerydiv中包含img元素则直接使用该img作为图片源。
var $img=this.getObj().children("img").first();
var img=$img.get(0);//dom
if(img){
p.picUrl=$img.attr("src");
p.picWidth=$img.width(); //当前展示的大小
p.picHeight=$img.height(); //当前展示的大小
}
//
var htmlArr = [];
var width = p.picWidth?'width:'+ p.picWidth+'px;':'';
var height = p.picHeight?'height:'+ p.picHeight+'px;':'';
htmlArr.push('<div style="position: relative;'+width+ height+'">');//FIXME 是不是少了结束标志?
htmlArr.push('<img class="baseDrawing" src="'+ p.picUrl+'" style="'+width+height+'" />');
htmlArr.push('<div class="modal" style="position: absolute;z-index: 99;'+width+height+';left:0;float: left;top: 0;"></div>');
htmlArr.push('</div>');
g.getObj().html(htmlArr.join(""));
//补齐p.picWidth p.picHeight,marks绘图使用p的这两个参数作为大小缓存参考使用。FIXME 之后应该直接根据标记板大小作为计算依据。
var $img= g.getObj().find("img.baseDrawing").first();
p.picWidth=$img.width(); //当前展示的大小
p.picHeight=$img.height(); //当前展示的大小
},
//创建蒙版中的标注。(wrapper)生成一个div并放入标注的img对象picObj。
//返回该标注对象
addMarker:function(x,y,picObj){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $modal = $picture.find(".modal");//找到蒙版
var wrapper = $("<div style='position: absolute;' class='marker'/>");
//addMarker的时候增加单击事件
wrapper.bind("click",function(){//单击事件
g.selectedMarker = wrapper; //单击选中
}) ;
wrapper.css({left: x,top: y});
picObj.appendTo(wrapper);
$modal.append(wrapper);
if(p.isEdit){
//编辑状态下包装成可dd的dom
new Dragdrop({
target : wrapper[0] ,
area:[0,$picture.width()-wrapper.width(),0,$picture.height()-wrapper.height()]
});
}
return wrapper;
},
//
// 为界面元素增加操作事件功能
//
initEvent:function(){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $markObj = $("#"+p.markId);
var $modal = $picture.find(".modal");
//jq对象增加单击事件,用来增加标注对象
$picture.click(function(e){
if(g.getFlag()>0&& p.isEdit){ //编辑模式时才添加具体标注对象 //flag?难道是点击markid对象后的状态?
var target = $($markObj.find("img")[g.getFlag()-1]).clone(); //获取点击markObjdom对象(img)并复制一份。
//计算需要标注的位置
var left = e.clientX- $modal.parent()[0].offsetLeft;//offsetLeft?
var top = e.clientY-$modal.parent()[0].offsetTop;
//生成标注对象并包装标注图标
var wrapper = g.addMarker(left,top,target);
g.selectedMarker = wrapper; //刚刚新增的元素直接为选中元素。
g.setFlag( -1);
$picture.css({cursor:"pointer"}); //鼠标样式复原
//标注对象模板的src作为事件的参数,为标注对象增加双击事件
var url = target.attr("src");
var params = {x:left,y:top,url:url};
g.addDblEvent(wrapper,params); //为标注对象增加事件 (编辑模式时)
}
});
},
// 初始化标注对象模板工具
initCursor:function(){
var p = this.options;
var g = this;
var $picture = g.getObj(); //jq对象 标注板
var $markObj = $("#"+p.markId);
//增加标注对象模板的单击事件。设置状态,让鼠标可以在蒙版上增加标注。即鼠标选择的标注模板,准备在模板标注。
$markObj.find("img").click(function(e){
e.stopPropagation();
g.setFlag($markObj.find("img").index($(this))+1) ;
var src = $(this).attr("src");
src = src.substring(0,src.lastIndexOf(".")+1)+"cur";
$picture.css({cursor:"crosshair"}); //一旦点选标注模板则鼠标状态为十字样式
if(p.hasSelfCur){ //使用用户设定的鼠标样式
$picture.css({cursor:"url("+src+"),auto"});
}
});
},
//
//获取数据
loadData:function(){
var p = this.options;
var g = this;
$.ajax({
type:"post",
url: p.markerUrl,
dataType:"json",
success:function(data){
var marks=data.marks;
//读取数据后根据获取的数据直接构造标记
for(var i= 0;i<marks.length;i++){
var mark = marks[i];
var $pic = $("<img src='"+mark.url+"'/>") ;
//使用比例计算而不是使用点状设置
// var x=mark.x
// var y=mark.y
var x=Math.round(mark.xp*p.picWidth);
var y=Math.round(mark.yp*p.picHeight);
//
// alert(""+x+""+y);
var $wrap = g.addMarker(x,y,$pic);
if(!p.isEdit){ //当不是编辑模式时,提供一个viewClick函数回调,在非编辑模式中提供单击事件扩展。
$wrap.click(function(){
if(p.viewClick){
p.viewClick.call(this,$wrap,mark);
}
});
}else{ //如果是编辑模式
g.addDblEvent($wrap,mark) ;
}
}
}
});
},
//
//可为标注对象增加双击事件
//
addDblEvent:function(wrapper,params){
var p = this.options;
var g = this;
var $picture = g.getObj();
var $modal = $picture.find(".modal");
//标注对象的双击事件
//该部分做了扩展框架,主要是引用artDialog或iframe提供编辑框。并提供回调函数支持。
wrapper.dblclick(function(){
params.x = parseInt(wrapper.css("left"));
params.y = parseInt(wrapper.css("top"));
if(p.dblclick){ //自定义双击后的click。该部分更可以替换之后的hasDialoy的实现。内嵌弹出窗口是集成的默认实现。
p.dblclick.call(this,wrapper,params) ;
return;
}
if(! wrapper.attr("hasDialog")){
var dialog= $.artDialog({
title: p.popTitle,
width :p.popWidth,
height :p.popHeight,
left:parseInt($modal.parent()[0].offsetLeft)+(parseInt($picture.find("div").eq(0).width())-p.popWidth)/2,
top:parseInt($modal.parent()[0].offsetTop)+(parseInt($picture.find("div").eq(0).height())-p.popHeight)/2,
ok: function(){
if(p.okCallBack){
var $return = p.okCallBack.call(this);
if($return){
wrapper.removeAttr("hasDialog");
return true;
}
}
wrapper.removeAttr("hasDialog");
},cancel:function(){
wrapper.removeAttr("hasDialog");
if(p.cancelCallBack){
var $return = p.cancelCallBack.call(this);
if($return){
wrapper.removeAttr("hasDialog");
return true;
}
}
wrapper.removeAttr("hasDialog");
}
});
wrapper.attr("hasDialog",true);
//
var picPropertyUrl = g.getUrl();
for(var key in params){
picPropertyUrl +="&"+key+"="+params[key];
}
if(!p.frame){
$.ajax({
type:"post",
data:params,
url: g.getUrl(),
success:function(data){
dialog.content(data);
}
});
}else{
dialog.content("<iframe height='100%' width='100%' style='overflow:hidden' frameborder=0 src='"+picPropertyUrl+"'>");
}
}
})
},
//获取url的地址
getUrl:function(){
var p = this.options;
var url = p.url;
if(url.indexOf("?")>0){
return url;
}else{
return url +"?";
}
},
//用来获取设置标记板的状态,该状态是指用户点击标记对象模板后与一般状态的区别。-1标识一般状态 >0表示可以在标记板上单击增加标注。
setFlag:function(value){
this.flag = value;
},
getFlag:function(){
return this.flag;
},
//用来获取设置jq对象,即标记板主体。
setObj:function(obj){
this.obj = obj;
},
getObj:function(){
return this.obj;
},
/**
* 获取标注信息 ,返回JSON数组
* @return {Array}
*/
getMarkerInfo:function(){
var reObj={};
var ret = [];
var g = this;
var p = this.options;
var $picture = g.getObj();
var $modal = $picture.find(".modal");
//标注板信息
var markBoard={};
markBoard.picUrl=p.picUrl;
markBoard.width=p.picWidth;
markBoard.height=p.picHeight;
reObj.markBoard=markBoard;
//标注集合信息
var $markers = $(".marker",$modal);
var info;
var $marker;
for(var i=0;i<$markers.length;i++){
info = {};
$marker = $($markers[i]);
info.x = parseInt($marker.css("left"));
info.y = parseInt($marker.css("top"));
//增加百分比记录 FIXME临时使用option中的参数,其实应该提供一个缓存。
info.xp = info.x / p.picWidth;
info.yp = info.y / p.picHeight;
//
info.url = $marker.find("img").attr("src");
ret.push(info);
}
//复杂结构
reObj.marks=ret;
return reObj;
},
//删除所选标注对象
deleteSelectedMarker:function(){
var g = this;
if(g.selectedMarker){
g.selectedMarker.remove();
g.selectedMarker = null;
}
}
};
//可拖拽dom对象包装
Dragdrop = function(window){
var doc = window.document;
var E = {
on : function(el, type, fn){
el.addEventListener ?
el.addEventListener(type, fn, false) :
el.attachEvent ?
el.attachEvent("on" + type, fn) :
el['on'+type] = fn;
},
un : function(el,type,fn){
el.removeEventListener ?
el.removeEventListener(type, fn, false) :
el.detachEvent ?
el.detachEvent("on" + type, fn) :
el['on'+type] = null;
},
| return e || window.event;
}
};
return function(opt){
var conf = null, defaultConf, diffX, diffY;
function Config(opt){
this.target = opt.target;
this.bridge = opt.bridge;
this.dragable = opt.dragable != false;
this.dragX = opt.dragX != false;
this.dragY = opt.dragY != false;
this.area = opt.area;
this.callback = opt.callback;
}
function Dragdrop(opt){
if(!opt){return;}
conf = new Config(opt);
defaultConf = new Config(opt);
conf.bridge ?
E.on(conf.bridge,'mousedown',mousedown) :
E.on(conf.target,'mousedown',mousedown);
}
Dragdrop.prototype = {
dragX : function(){
conf.dragX = true;
conf.dragY = false;
},
dragY : function(b){
conf.dragY = true;
conf.dragX = false;
},
dragAll : function(){
conf.dragX = true;
conf.dragY = true;
},
setArea : function(a){
conf.area = a;
},
setBridge : function(b){
conf.bridge = b;
},
setDragable : function(b){
conf.dragable = b;
},
reStore : function(){
conf = new Config(defaultConf);
conf.target.style.top = '0px';
conf.target.style.left = '0px';
},
getDragX : function(){
return conf.dragX;
},
getDragY : function(){
return conf.dragY;
}
};
function mousedown(e){
e = E.evt(e);
var el = conf.target;
el.style.position = 'absolute';
el.style.cursor = 'move';
if(el.setCapture){ //IE
E.on(el, "losecapture", mouseup);
el.setCapture();
e.cancelBubble = true;
}else if(window.captureEvents){ //标准DOM
e.stopPropagation();
E.on(window, "blur", mouseup);
e.preventDefault();
}
diffX = e.clientX - el.offsetLeft;
diffY = e.clientY - el.offsetTop;
E.on(doc,'mousemove',mousemove);
E.on(doc,'mouseup',mouseup);
}
function mousemove(e){
var el = conf.target, e = E.evt(e), moveX = e.clientX - diffX, moveY = e.clientY - diffY;
var minX, maxX, minY, maxY;
if(conf.area){
minX = conf.area[0];
maxX = conf.area[1];
minY = conf.area[2];
maxY = conf.area[3];
moveX < minX && (moveX = minX); // left 最小值
moveX > maxX && (moveX = maxX); // left 最大值
moveY < minY && (moveY = minY); // top 最小值
moveY > maxY && (moveY = maxY); // top 最大值
}
if(conf.dragable){
conf.dragX && (el.style.left = moveX + 'px');
conf.dragY && (el.style.top = moveY + 'px');
if(conf.callback){
var obj = {moveX:moveX,moveY:moveY};
conf.callback.call(conf,obj);
}
}
}
function mouseup(e){
var el = conf.target;
el.style.cursor = 'default';
E.un(doc,'mousemove',mousemove);
E.un(doc,'mouseup',mouseup);
if(el.releaseCapture){ //IE
E.un(el, "losecapture", mouseup);
el.releaseCapture();
}
if(window.releaseEvents){ //标准DOM
E.un(window, "blur", mouseup);
}
}
return new Dragdrop(opt);
}
}(this);
})(jQuery); | evt : function(e){
| conditional_block |
pools.rs | use crate::*;
use serde::{Deserialize, Serialize};
impl BlockFrostApi {
endpoints! {
/// List of registered stake pools.
pools() -> Vec<String> => "/pools";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools/get"),
/// List of already retired pools.
pools_retired() -> Vec<RetiredPool> => "/pools/retired";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retired/get"),
/// List of retiring stake pools.
pools_retiring() -> Vec<RetiringPool> => "/pools/retired";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retiring/get"),
/// Pool information.
pools_by_id(pool_id: &str) -> Pool => "/pools/{pool_id}";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}/get"),
/// History of stake pool parameters over epochs.
pools_history(pool_id: &str) -> Vec<PoolHistory> => "/pools/{pool_id}/history";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1history/get"),
/// Stake pool registration metadata.
pools_metadata(pool_id: &str) -> PoolMetadata => "/pools/{pool_id}/metadata";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1metadata/get"),
/// Relays of a stake pool.
pools_relays(pool_id: &str) -> Vec<PoolRelay> => "/pools/{pool_id}/relays";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1relays/get"),
/// List of current stake pools delegators.
pools_delegators(pool_id: &str) -> Vec<PoolDelegator> => "/pools/{pool_id}/delegators";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1delegators/get"),
/// List of stake pool blocks.
pools_blocks(pool_id: &str) -> Vec<String> => "/pools/{pool_id}/blocks";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1blocks/get"),
/// List of certificate updates to the stake pool.
pools_updates(pool_id: &str) -> Vec<PoolUpdate> => "/pools/{pool_id}/updates";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1updates/get"),
}
}
/// Created by [`pools_retired`](BlockFrostApi::pools_retired) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct RetiredPool {
/// Bech32 encoded pool ID.
pub pool_id: String,
/// Retirement epoch number.
pub epoch: Integer,
}
/// Created by [`pools_retiring`](BlockFrostApi::pools_retiring) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct RetiringPool {
/// Bech32 encoded pool ID.
pub pool_id: String,
/// Retirement epoch number.
pub epoch: Integer,
}
/// Created by [`pools_by_id`](BlockFrostApi::pools_by_id) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Pool {
/// Bech32 pool ID.
pub pool_id: String,
/// Hexadecimal pool ID.
pub hex: String,
/// VRF key hash.
pub vrf_key: String,
/// Total minted blocks.
pub blocks_minted: Integer,
pub live_stake: String,
pub live_size: Float,
pub live_saturation: Float,
pub live_delegators: Integer,
pub active_stake: String,
pub active_size: Float,
/// Stake pool certificate pledge.
pub declared_pledge: String,
/// Stake pool urrent pledge.
pub live_pledge: String,
/// Margin tax cost of the stake pool.
pub margin_cost: Float,
/// Fixed tax cost of the stake pool.
pub fixed_cost: String,
/// Bech32 reward account of the stake pool.
pub reward_account: String,
pub owners: Vec<String>,
pub registration: Vec<String>,
pub retirement: Vec<String>,
}
/// Created by [`pools_history`](BlockFrostApi::pools_history) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolHistory {
/// Epoch number.
pub epoch: Integer,
/// Number of blocks created by pool.
pub blocks: Integer,
/// Active (Snapshot of live stake 2 epochs ago) stake in Lovelaces.
pub active_stake: String,
/// Pool size (percentage) of overall active stake at that epoch.
pub active_size: Float,
/// Number of delegators for epoch.
pub delegators_count: Integer,
/// Total rewards received before distribution to delegators.
pub rewards: String,
/// Pool operator rewards.
pub fees: String,
}
/// Created by [`pools_metadata`](BlockFrostApi::pools_metadata) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolMetadata {
/// Bech32 pool ID.
pub pool_id: String,
/// Hexadecimal pool ID.
pub hex: String,
/// URL to the stake pool metadata.
pub url: Option<String>,
/// Hash of the metadata file.
pub hash: Option<String>,
/// Ticker of the stake pool.
pub ticker: Option<String>,
/// Name of the stake pool.
pub name: Option<String>,
/// Description of the stake pool.
pub description: Option<String>,
/// Home page of the stake pool.
pub homepage: Option<String>,
}
/// Created by [`pools_relays`](BlockFrostApi::pools_relays) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolRelay {
/// IPv4 address of the relay.
pub ipv4: Option<String>,
/// IPv6 address of the relay.
pub ipv6: Option<String>,
/// DNS name of the relay.
pub dns: Option<String>,
/// DNS SRV entry of the relay.
pub dns_srv: Option<String>,
/// Network port of the relay.
pub port: Integer,
}
/// Created by [`pools_delegators`](BlockFrostApi::pools_delegators) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolDelegator {
/// Bech32 encoded stake addresses.
pub address: String,
/// Currently delegated amount.
pub live_stake: String,
}
/// Created by [`pools_updates`](BlockFrostApi::pools_updates) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolUpdate {
/// Transaction ID.
pub tx_hash: String,
/// Certificate within the transaction.
pub cert_index: Integer,
/// Action in the certificate.
pub action: ActionType, // "registered" | "deregistered"
}
#[cfg(test)]
mod tests {
use super::*;
test_schema! { test_pools, Vec<String>, r#"
[
"pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy",
"pool1hn7hlwrschqykupwwrtdfkvt2u4uaxvsgxyh6z63703p2knj288",
"pool1ztjyjfsh432eqetadf82uwuxklh28xc85zcphpwq6mmezavzad2"
]
"# }
test_schema! { test_pools_retired, Vec<RetiredPool>, r#"
[
{
"pool_id": "pool19u64770wqp6s95gkajc8udheske5e6ljmpq33awxk326zjaza0q",
"epoch": 225
},
{
"pool_id": "pool1dvla4zq98hpvacv20snndupjrqhuc79zl6gjap565nku6et5zdx",
"epoch": 215
},
{
"pool_id": "pool1wvccajt4eugjtf3k0ja3exjqdj7t8egsujwhcw4tzj4rzsxzw5w",
"epoch": 231
}
]
"# }
test_schema! { test_pools_retiring, Vec<RetiringPool>, r#"
[
{
"pool_id": "pool19u64770wqp6s95gkajc8udheske5e6ljmpq33awxk326zjaza0q",
"epoch": 225
},
{
"pool_id": "pool1dvla4zq98hpvacv20snndupjrqhuc79zl6gjap565nku6et5zdx",
"epoch": 215
},
{
"pool_id": "pool1wvccajt4eugjtf3k0ja3exjqdj7t8egsujwhcw4tzj4rzsxzw5w",
"epoch": 231
}
]
"# }
test_schema! { test_pools_by_id, Pool, r#"
{
"pool_id": "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy",
"hex": "0f292fcaa02b8b2f9b3c8f9fd8e0bb21abedb692a6d5058df3ef2735",
"vrf_key": "0b5245f9934ec2151116fb8ec00f35fd00e0aa3b075c4ed12cce440f999d8233",
"blocks_minted": 69,
"live_stake": "6900000000",
"live_size": 0.42,
"live_saturation": 0.93,
"live_delegators": 127,
"active_stake": "4200000000",
"active_size": 0.43,
"declared_pledge": "5000000000",
"live_pledge": "5000000001",
"margin_cost": 0.05,
"fixed_cost": "340000000",
"reward_account": "stake1uxkptsa4lkr55jleztw43t37vgdn88l6ghclfwuxld2eykgpgvg3f",
"owners": [
"stake1u98nnlkvkk23vtvf9273uq7cph5ww6u2yq2389psuqet90sv4xv9v"
],
"registration": [
"9f83e5484f543e05b52e99988272a31da373f3aab4c064c76db96643a355d9dc",
"7ce3b8c433bf401a190d58c8c483d8e3564dfd29ae8633c8b1b3e6c814403e95",
"3e6e1200ce92977c3fe5996bd4d7d7e192bcb7e231bc762f9f240c76766535b9"
],
"retirement": [
"252f622976d39e646815db75a77289cf16df4ad2b287dd8e3a889ce14c13d1a8"
]
}
"# }
test_schema! { test_pools_history, Vec<PoolHistory>, r#"
[
{
"epoch": 233,
"blocks": 22,
"active_stake": "20485965693569",
"active_size": 1.2345,
"delegators_count": 115,
"rewards": "206936253674159",
"fees": "1290968354"
}
]
"# }
test_schema! { test_pools_metadata, PoolMetadata, r#"
{
"pool_id": "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy",
"hex": "0f292fcaa02b8b2f9b3c8f9fd8e0bb21abedb692a6d5058df3ef2735",
"url": "https://stakenuts.com/mainnet.json",
"hash": "47c0c68cb57f4a5b4a87bad896fc274678e7aea98e200fa14a1cb40c0cab1d8c",
"ticker": "NUTS",
"name": "Stake Nuts",
"description": "The best pool ever",
"homepage": "https://stakentus.com/"
}
"# }
test_schema! { test_pools_relays, Vec<PoolRelay>, r#"
[
{
"ipv4": "4.4.4.4", | ]
"# }
test_schema! { test_pool_delegators, Vec<PoolDelegator>, r#"
[
{
"address": "stake1ux4vspfvwuus9uwyp5p3f0ky7a30jq5j80jxse0fr7pa56sgn8kha",
"live_stake": "1137959159981411"
},
{
"address": "stake1uylayej7esmarzd4mk4aru37zh9yz0luj3g9fsvgpfaxulq564r5u",
"live_stake": "16958865648"
},
{
"address": "stake1u8lr2pnrgf8f7vrs9lt79hc3sxm8s2w4rwvgpncks3axx6q93d4ck",
"live_stake": "18605647"
}
]
"# }
test_schema! { test_pools_blocks, Vec<String>, r#"
[
"d8982ca42cfe76b747cc681d35d671050a9e41e9cfe26573eb214e94fe6ff21d",
"026436c539e2ce84c7f77ffe669f4e4bbbb3b9c53512e5857dcba8bb0b4e9a8c",
"bcc8487f419b8c668a18ea2120822a05df6dfe1de1f0fac3feba88cf760f303c",
"86bf7b4a274e0f8ec9816171667c1b4a0cfc661dc21563f271acea9482b62df7"
]
"# }
test_schema! { test_pools_updates, Vec<PoolUpdate>, r#"
[
{
"tx_hash": "6804edf9712d2b619edb6ac86861fe93a730693183a262b165fcc1ba1bc99cad",
"cert_index": 0,
"action": "registered"
},
{
"tx_hash": "9c190bc1ac88b2ab0c05a82d7de8b71b67a9316377e865748a89d4426c0d3005",
"cert_index": 0,
"action": "deregistered"
},
{
"tx_hash": "e14a75b0eb2625de7055f1f580d70426311b78e0d36dd695a6bdc96c7b3d80e0",
"cert_index": 1,
"action": "registered"
}
]
"# }
} | "ipv6": "https://stakenuts.com/mainnet.json",
"dns": "relay1.stakenuts.com",
"dns_srv": "_relays._tcp.relays.stakenuts.com",
"port": 3001
} | random_line_split |
pools.rs | use crate::*;
use serde::{Deserialize, Serialize};
impl BlockFrostApi {
endpoints! {
/// List of registered stake pools.
pools() -> Vec<String> => "/pools";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools/get"),
/// List of already retired pools.
pools_retired() -> Vec<RetiredPool> => "/pools/retired";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retired/get"),
/// List of retiring stake pools.
pools_retiring() -> Vec<RetiringPool> => "/pools/retired";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retiring/get"),
/// Pool information.
pools_by_id(pool_id: &str) -> Pool => "/pools/{pool_id}";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}/get"),
/// History of stake pool parameters over epochs.
pools_history(pool_id: &str) -> Vec<PoolHistory> => "/pools/{pool_id}/history";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1history/get"),
/// Stake pool registration metadata.
pools_metadata(pool_id: &str) -> PoolMetadata => "/pools/{pool_id}/metadata";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1metadata/get"),
/// Relays of a stake pool.
pools_relays(pool_id: &str) -> Vec<PoolRelay> => "/pools/{pool_id}/relays";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1relays/get"),
/// List of current stake pools delegators.
pools_delegators(pool_id: &str) -> Vec<PoolDelegator> => "/pools/{pool_id}/delegators";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1delegators/get"),
/// List of stake pool blocks.
pools_blocks(pool_id: &str) -> Vec<String> => "/pools/{pool_id}/blocks";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1blocks/get"),
/// List of certificate updates to the stake pool.
pools_updates(pool_id: &str) -> Vec<PoolUpdate> => "/pools/{pool_id}/updates";
("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1updates/get"),
}
}
/// Created by [`pools_retired`](BlockFrostApi::pools_retired) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct RetiredPool {
/// Bech32 encoded pool ID.
pub pool_id: String,
/// Retirement epoch number.
pub epoch: Integer,
}
/// Created by [`pools_retiring`](BlockFrostApi::pools_retiring) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct RetiringPool {
/// Bech32 encoded pool ID.
pub pool_id: String,
/// Retirement epoch number.
pub epoch: Integer,
}
/// Created by [`pools_by_id`](BlockFrostApi::pools_by_id) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct | {
/// Bech32 pool ID.
pub pool_id: String,
/// Hexadecimal pool ID.
pub hex: String,
/// VRF key hash.
pub vrf_key: String,
/// Total minted blocks.
pub blocks_minted: Integer,
pub live_stake: String,
pub live_size: Float,
pub live_saturation: Float,
pub live_delegators: Integer,
pub active_stake: String,
pub active_size: Float,
/// Stake pool certificate pledge.
pub declared_pledge: String,
/// Stake pool urrent pledge.
pub live_pledge: String,
/// Margin tax cost of the stake pool.
pub margin_cost: Float,
/// Fixed tax cost of the stake pool.
pub fixed_cost: String,
/// Bech32 reward account of the stake pool.
pub reward_account: String,
pub owners: Vec<String>,
pub registration: Vec<String>,
pub retirement: Vec<String>,
}
/// Created by [`pools_history`](BlockFrostApi::pools_history) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolHistory {
/// Epoch number.
pub epoch: Integer,
/// Number of blocks created by pool.
pub blocks: Integer,
/// Active (Snapshot of live stake 2 epochs ago) stake in Lovelaces.
pub active_stake: String,
/// Pool size (percentage) of overall active stake at that epoch.
pub active_size: Float,
/// Number of delegators for epoch.
pub delegators_count: Integer,
/// Total rewards received before distribution to delegators.
pub rewards: String,
/// Pool operator rewards.
pub fees: String,
}
/// Created by [`pools_metadata`](BlockFrostApi::pools_metadata) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolMetadata {
/// Bech32 pool ID.
pub pool_id: String,
/// Hexadecimal pool ID.
pub hex: String,
/// URL to the stake pool metadata.
pub url: Option<String>,
/// Hash of the metadata file.
pub hash: Option<String>,
/// Ticker of the stake pool.
pub ticker: Option<String>,
/// Name of the stake pool.
pub name: Option<String>,
/// Description of the stake pool.
pub description: Option<String>,
/// Home page of the stake pool.
pub homepage: Option<String>,
}
/// Created by [`pools_relays`](BlockFrostApi::pools_relays) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolRelay {
/// IPv4 address of the relay.
pub ipv4: Option<String>,
/// IPv6 address of the relay.
pub ipv6: Option<String>,
/// DNS name of the relay.
pub dns: Option<String>,
/// DNS SRV entry of the relay.
pub dns_srv: Option<String>,
/// Network port of the relay.
pub port: Integer,
}
/// Created by [`pools_delegators`](BlockFrostApi::pools_delegators) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolDelegator {
/// Bech32 encoded stake addresses.
pub address: String,
/// Currently delegated amount.
pub live_stake: String,
}
/// Created by [`pools_updates`](BlockFrostApi::pools_updates) method.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PoolUpdate {
/// Transaction ID.
pub tx_hash: String,
/// Certificate within the transaction.
pub cert_index: Integer,
/// Action in the certificate.
pub action: ActionType, // "registered" | "deregistered"
}
#[cfg(test)]
mod tests {
use super::*;
test_schema! { test_pools, Vec<String>, r#"
[
"pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy",
"pool1hn7hlwrschqykupwwrtdfkvt2u4uaxvsgxyh6z63703p2knj288",
"pool1ztjyjfsh432eqetadf82uwuxklh28xc85zcphpwq6mmezavzad2"
]
"# }
test_schema! { test_pools_retired, Vec<RetiredPool>, r#"
[
{
"pool_id": "pool19u64770wqp6s95gkajc8udheske5e6ljmpq33awxk326zjaza0q",
"epoch": 225
},
{
"pool_id": "pool1dvla4zq98hpvacv20snndupjrqhuc79zl6gjap565nku6et5zdx",
"epoch": 215
},
{
"pool_id": "pool1wvccajt4eugjtf3k0ja3exjqdj7t8egsujwhcw4tzj4rzsxzw5w",
"epoch": 231
}
]
"# }
test_schema! { test_pools_retiring, Vec<RetiringPool>, r#"
[
{
"pool_id": "pool19u64770wqp6s95gkajc8udheske5e6ljmpq33awxk326zjaza0q",
"epoch": 225
},
{
"pool_id": "pool1dvla4zq98hpvacv20snndupjrqhuc79zl6gjap565nku6et5zdx",
"epoch": 215
},
{
"pool_id": "pool1wvccajt4eugjtf3k0ja3exjqdj7t8egsujwhcw4tzj4rzsxzw5w",
"epoch": 231
}
]
"# }
test_schema! { test_pools_by_id, Pool, r#"
{
"pool_id": "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy",
"hex": "0f292fcaa02b8b2f9b3c8f9fd8e0bb21abedb692a6d5058df3ef2735",
"vrf_key": "0b5245f9934ec2151116fb8ec00f35fd00e0aa3b075c4ed12cce440f999d8233",
"blocks_minted": 69,
"live_stake": "6900000000",
"live_size": 0.42,
"live_saturation": 0.93,
"live_delegators": 127,
"active_stake": "4200000000",
"active_size": 0.43,
"declared_pledge": "5000000000",
"live_pledge": "5000000001",
"margin_cost": 0.05,
"fixed_cost": "340000000",
"reward_account": "stake1uxkptsa4lkr55jleztw43t37vgdn88l6ghclfwuxld2eykgpgvg3f",
"owners": [
"stake1u98nnlkvkk23vtvf9273uq7cph5ww6u2yq2389psuqet90sv4xv9v"
],
"registration": [
"9f83e5484f543e05b52e99988272a31da373f3aab4c064c76db96643a355d9dc",
"7ce3b8c433bf401a190d58c8c483d8e3564dfd29ae8633c8b1b3e6c814403e95",
"3e6e1200ce92977c3fe5996bd4d7d7e192bcb7e231bc762f9f240c76766535b9"
],
"retirement": [
"252f622976d39e646815db75a77289cf16df4ad2b287dd8e3a889ce14c13d1a8"
]
}
"# }
test_schema! { test_pools_history, Vec<PoolHistory>, r#"
[
{
"epoch": 233,
"blocks": 22,
"active_stake": "20485965693569",
"active_size": 1.2345,
"delegators_count": 115,
"rewards": "206936253674159",
"fees": "1290968354"
}
]
"# }
test_schema! { test_pools_metadata, PoolMetadata, r#"
{
"pool_id": "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy",
"hex": "0f292fcaa02b8b2f9b3c8f9fd8e0bb21abedb692a6d5058df3ef2735",
"url": "https://stakenuts.com/mainnet.json",
"hash": "47c0c68cb57f4a5b4a87bad896fc274678e7aea98e200fa14a1cb40c0cab1d8c",
"ticker": "NUTS",
"name": "Stake Nuts",
"description": "The best pool ever",
"homepage": "https://stakentus.com/"
}
"# }
test_schema! { test_pools_relays, Vec<PoolRelay>, r#"
[
{
"ipv4": "4.4.4.4",
"ipv6": "https://stakenuts.com/mainnet.json",
"dns": "relay1.stakenuts.com",
"dns_srv": "_relays._tcp.relays.stakenuts.com",
"port": 3001
}
]
"# }
test_schema! { test_pool_delegators, Vec<PoolDelegator>, r#"
[
{
"address": "stake1ux4vspfvwuus9uwyp5p3f0ky7a30jq5j80jxse0fr7pa56sgn8kha",
"live_stake": "1137959159981411"
},
{
"address": "stake1uylayej7esmarzd4mk4aru37zh9yz0luj3g9fsvgpfaxulq564r5u",
"live_stake": "16958865648"
},
{
"address": "stake1u8lr2pnrgf8f7vrs9lt79hc3sxm8s2w4rwvgpncks3axx6q93d4ck",
"live_stake": "18605647"
}
]
"# }
test_schema! { test_pools_blocks, Vec<String>, r#"
[
"d8982ca42cfe76b747cc681d35d671050a9e41e9cfe26573eb214e94fe6ff21d",
"026436c539e2ce84c7f77ffe669f4e4bbbb3b9c53512e5857dcba8bb0b4e9a8c",
"bcc8487f419b8c668a18ea2120822a05df6dfe1de1f0fac3feba88cf760f303c",
"86bf7b4a274e0f8ec9816171667c1b4a0cfc661dc21563f271acea9482b62df7"
]
"# }
test_schema! { test_pools_updates, Vec<PoolUpdate>, r#"
[
{
"tx_hash": "6804edf9712d2b619edb6ac86861fe93a730693183a262b165fcc1ba1bc99cad",
"cert_index": 0,
"action": "registered"
},
{
"tx_hash": "9c190bc1ac88b2ab0c05a82d7de8b71b67a9316377e865748a89d4426c0d3005",
"cert_index": 0,
"action": "deregistered"
},
{
"tx_hash": "e14a75b0eb2625de7055f1f580d70426311b78e0d36dd695a6bdc96c7b3d80e0",
"cert_index": 1,
"action": "registered"
}
]
"# }
}
| Pool | identifier_name |
course-ripper.py | import requests
from lxml import html
import subprocess
import os
import re
from bs4 import BeautifulSoup
'''
ideas: change the course data structure toa a list of dictionaries. Then each
dictionary has a 's_type' (section type: just using 'type' is illadvised
because it is built in to Python) key-value pair and the TeX creator will know
how to treat them based on that type
course = [
{
s_type: 'title',
value: 'The Course Title',
},
{
s_type: 'section',
heading: 'A Basic Section',
value: 'Some text for the section.',
},
{
s_type: 'section_aims',
heading: 'Course Aims',
value: [
'An aim',
'Another aim',
'One more aim',
],
}
{
s_type: 'info',
heading: 'An Info Heading'
value: 'The info'
}
]
Problems this solves:
+ no need for weird counting in parsing the BSoup
Problems it causes:
- need to figure out how to determine what type data is as it is read in
'''
def get_coursepage(code):
"""Given a course code, requests the correspnding course page"""
url = 'http://gla.ac.uk/coursecatalogue/course/?code=' + code
print url
coursepage = requests.get(url)
return coursepage
def get_course_title_only(code):
"""Given a course code, requests the corresponding page and returns only
the name of that course. This is used to generate a list of courses which I
have opted to disclude with only their course codes
"""
coursepage = get_coursepage(code)
soup = BeautifulSoup(coursepage.content, 'lxml')
title = [soup.find_all('h1')[2].string][0]
return title
def new_dict(heading, value):
"""Creates a dictionary with a heading-value pair, which is the structure
of all the sections in the courses dictionary
"""
value = value.replace('%', '\%').replace('&', '\&').replace(u'\xa0', ' ')
# Currently encoding is causeing me problems - the quick fix below removes
# all the characters that have broken the code so far. This solution is not
# likely to work if more courses were added
value = value.replace(u'\u25a0', '\\break').replace(u'\u037e', ';')
return {
'heading': heading,
'value': value,
}
def get_info_list(info_string, course):
"""Each course page has a small info section at the beginning, which I had
to extract and formulate in a different way to the main sections. This
function constructs the dictionary entries for he course when given a
string with all the details required for the info section
TODO:
There's definitely a better way to do this.
"""
info_list = []
split_on_newline = info_string.split("\n")
for elem in split_on_newline:
split = elem.split(": ")
for s in split:
info_list.append(s)
info_list = info_list[1:-1]
info_tags = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students',
]
i = 0
for info_tag in info_tags:
course[info_tag] = new_dict(
info_list[i] + ': ', info_list[i + 1])
i += 2
return course
def bsoup(coursepage):
"""Given a course page, takes the context and parses it to extract all the
useful information and construct a dictionary with the information
corresponding to assigned names ready to be written into the TeX file
TODO:
What a mess. There should be a way to do this by counting/incrementing.
"""
soup = BeautifulSoup(coursepage.content, 'lxml')
h1 = soup.find_all('h1')[2]
html = h1.find_next_siblings()
all_strings = [h1.string]
for div in html:
try:
text = div.get_text()
except:
text = div.string
if text is not None:
all_strings.append(text)
course = {'title': all_strings[0]}
course = get_info_list(all_strings[1], course)
course['description'] = new_dict(all_strings[2], all_strings[3])
course['timetable'] = new_dict(all_strings[4], all_strings[5])
course['requirements_of_entry'] = new_dict(all_strings[6], all_strings[7])
course['excluded_courses'] = new_dict(all_strings[8], all_strings[9])
course['co_requisites'] = new_dict(all_strings[10], all_strings[11])
course['assessment_weighting'] = new_dict(all_strings[12], all_strings[13])
course['aims'] = new_dict(all_strings[17], all_strings[18])
date = all_strings[14].split(': ')
course['assessment_date'] = new_dict(date[0] + ": ", date[1])
course['learning_outcomes'] = new_dict(all_strings[19], all_strings[20])
# TODO Doesn't parse Minimum Requirement for Award of Credit or
# Reassessment Options
return course
def create_not_included_list(codes):
"""Given a list of course codes, ge5t their corresponding titles and format
them in a bulletted TeX list. This is used to indicate in the abstract
which courses have been deliberately discluded from the document
"""
string = '\\begin{itemize}\n'
for code in codes:
title = get_course_title_only(code)
string += '\\item{' + title + '}\n'
string += '\\end{itemize}\n'
return string
def write_to_latex(codelist, unwanted_courses):
"""Constructs the entire TeX document from all the courses with key
document details (like author and table of contents)
"""
# TODO: investigate a way to add large amounts of text outside of the
# function
abstract01 = "I created this document to practice parsing html and using\
tools like Beautiful Soup which I've previously had little experience\
in. As a result, it's not perfect.\\newline\
It is also a slightly condensed all-in-one-place look at a selection\
of courses that are available for fourth year computer science\
students at the University of Glasgow. For the purposes of clarity I\
have removed several courses from this selection. The following\
courses have been omitted:"
abstract02 = "For more insight into the project, to report issues or to\
inspect the code, have a look at the GitHub:\
\\url{https://github.com/IS0metric/course-ripper}"
unincluded = create_not_included_list(unwanted_courses)
with open('courses.tex', 'w') as f:
# TODO Try and move all this to a separate function?
# TODO: Check if it's more efficient to write a single, massive string
# to file
f.write('\\documentclass{hitec}\n')
f.write('\\usepackage[document]{ragged2e}\n')
f.write('\\usepackage{url}\n')
f.write('\\usepackage{hyperref}\n')
f.write('\\setcounter{tocdepth}{4}\n')
f.write('\\begin{document}\n')
f.write('\\title{Fourth Year (2016-17) Courses}\n')
f.write('\\author{Jack Parkinson}\n')
f.write('\\date{August 2016}\n')
f.write('\\maketitle\n')
f.write('\\abstract{' + abstract01 + unincluded + abstract02 + '}\n')
f.write('\\newpage\n\n')
f.write('\\tableofcontents\n')
f.write('\\newpage\n\n')
# TODO: Look into alternatives to the three lists
all_courses = []
sem1_courses = []
sem2_courses = []
for code in codelist:
course = bsoup(get_coursepage(code))
if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':
all_courses.append(course)
elif "1" in course['offered']['value']:
sem1_courses.append(course)
elif "2" in course['offered']['value']:
sem2_courses.append(course)
f.write('\\section{Semester 1 and 2 Courses}\n\n')
for course in all_courses:
f.write(latex_course(course))
f.write('\\section{Semester 1 Only Courses}\n\n')
for course in sem1_courses:
f.write(latex_course(course))
f.write('\\section{Semester 2 Only Courses}\n\n')
for course in sem2_courses:
f.write(latex_course(course))
f.write('\\end{document}')
return None
def latex_info(info):
"""Provides the special treatment that the info section requires"""
return '\\textbf{' + info['heading'] + '} ' + info['value'] + ' \\break\n'
def | (section):
"""Creates a TeX formatted string for a given subsubsection"""
string = '\\subsubsection*{' + section['heading'] + '}\n'
string += section['value'] + '\n'
return string
def latex_course(course):
"""Creates a TeX formatted string for a course"""
basic_info_list = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students'
]
generic_subsection_list = [
'description', 'timetable', 'requirements_of_entry',
'excluded_courses', 'co_requisites', 'assessment_weighting'
]
string = '\\subsection{' + course["title"] + '}\n'
for info in basic_info_list:
string += latex_info(course[info])
for subsection in generic_subsection_list:
string += latex_subsection(course[subsection])
string += '\\break \\textbf{' + course['assessment_date'][
'heading'] + '}' + course['assessment_date']['value'] + '\n'
string += latex_subsection(course['aims'])
string += '\\subsubsection*{' + \
course['learning_outcomes']['heading'] + '}\n'
outcome_list = re.split(
'\d+\. ', course['learning_outcomes']['value'])
string += outcome_list[0] + '\n'
string += '\\begin{enumerate}\n'
for i in outcome_list[1:-1]:
string += '\\item ' + i + '\n'
string += '\\end{enumerate}\n'
return string
def create_tex(unwanted_courses, wanted_courses=None):
"""Creates the TeX document from the Computer Science Course Catalog"""
page = requests.get(
'http://gla.ac.uk/coursecatalogue/courselist/' +
'?code=REG30200000&name=School+of+Computing+Science')
tree = html.fromstring(page.content)
spans = tree.xpath('//span/text()')
codes = []
if wanted_courses is None:
for s in spans:
if s[0:4] == "COMP" and s[7] == '4' and s not in unwanted_courses:
codes.append(s)
else:
for s in wanted_courses:
codes.append(s)
write_to_latex(codes, unwanted_courses)
return None
def pdflatex(unwanted_courses):
"""Generates a TeX document and then runs the pdflatex command to create a
PDF from the TeX
"""
create_tex(unwanted_courses)
cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex']
proc = subprocess.Popen(cmd)
proc.communicate()
return None
if __name__ == "__main__":
# List of deliberately excluded courses
unwanted_courses = [
'COMPSCI4010', 'COMPSCI4009', 'COMPSCI4013', 'COMPSCI4024P',
'COMPSCI4014', 'COMPSCI4012', 'COMPSCI4011', 'COMPSCI4038',
'COMPSCI4015', 'COMPSCI4016', 'COMPSCI4046', 'COMPSCI4047',
'COMPSCI4044', 'COMPSCI4070', 'COMPSCI4038',
]
create_tex(unwanted_courses)
| latex_subsection | identifier_name |
course-ripper.py | import requests
from lxml import html
import subprocess
import os
import re
from bs4 import BeautifulSoup
'''
ideas: change the course data structure toa a list of dictionaries. Then each
dictionary has a 's_type' (section type: just using 'type' is illadvised
because it is built in to Python) key-value pair and the TeX creator will know
how to treat them based on that type
course = [
{
s_type: 'title',
value: 'The Course Title',
},
{
s_type: 'section',
heading: 'A Basic Section',
value: 'Some text for the section.',
},
{
s_type: 'section_aims',
heading: 'Course Aims',
value: [
'An aim',
'Another aim',
'One more aim',
],
}
{
s_type: 'info',
heading: 'An Info Heading'
value: 'The info'
}
]
Problems this solves:
+ no need for weird counting in parsing the BSoup
Problems it causes:
- need to figure out how to determine what type data is as it is read in
'''
def get_coursepage(code):
"""Given a course code, requests the correspnding course page"""
url = 'http://gla.ac.uk/coursecatalogue/course/?code=' + code
print url
coursepage = requests.get(url)
return coursepage
def get_course_title_only(code):
"""Given a course code, requests the corresponding page and returns only
the name of that course. This is used to generate a list of courses which I
have opted to disclude with only their course codes
"""
coursepage = get_coursepage(code)
soup = BeautifulSoup(coursepage.content, 'lxml')
title = [soup.find_all('h1')[2].string][0]
return title
def new_dict(heading, value):
"""Creates a dictionary with a heading-value pair, which is the structure
of all the sections in the courses dictionary
"""
value = value.replace('%', '\%').replace('&', '\&').replace(u'\xa0', ' ')
# Currently encoding is causeing me problems - the quick fix below removes
# all the characters that have broken the code so far. This solution is not
# likely to work if more courses were added
value = value.replace(u'\u25a0', '\\break').replace(u'\u037e', ';')
return {
'heading': heading,
'value': value,
}
def get_info_list(info_string, course):
"""Each course page has a small info section at the beginning, which I had
to extract and formulate in a different way to the main sections. This
function constructs the dictionary entries for he course when given a
string with all the details required for the info section
TODO:
There's definitely a better way to do this.
"""
info_list = []
split_on_newline = info_string.split("\n")
for elem in split_on_newline:
split = elem.split(": ")
for s in split:
info_list.append(s)
info_list = info_list[1:-1]
info_tags = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students',
]
i = 0
for info_tag in info_tags:
|
return course
def bsoup(coursepage):
"""Given a course page, takes the context and parses it to extract all the
useful information and construct a dictionary with the information
corresponding to assigned names ready to be written into the TeX file
TODO:
What a mess. There should be a way to do this by counting/incrementing.
"""
soup = BeautifulSoup(coursepage.content, 'lxml')
h1 = soup.find_all('h1')[2]
html = h1.find_next_siblings()
all_strings = [h1.string]
for div in html:
try:
text = div.get_text()
except:
text = div.string
if text is not None:
all_strings.append(text)
course = {'title': all_strings[0]}
course = get_info_list(all_strings[1], course)
course['description'] = new_dict(all_strings[2], all_strings[3])
course['timetable'] = new_dict(all_strings[4], all_strings[5])
course['requirements_of_entry'] = new_dict(all_strings[6], all_strings[7])
course['excluded_courses'] = new_dict(all_strings[8], all_strings[9])
course['co_requisites'] = new_dict(all_strings[10], all_strings[11])
course['assessment_weighting'] = new_dict(all_strings[12], all_strings[13])
course['aims'] = new_dict(all_strings[17], all_strings[18])
date = all_strings[14].split(': ')
course['assessment_date'] = new_dict(date[0] + ": ", date[1])
course['learning_outcomes'] = new_dict(all_strings[19], all_strings[20])
# TODO Doesn't parse Minimum Requirement for Award of Credit or
# Reassessment Options
return course
def create_not_included_list(codes):
"""Given a list of course codes, ge5t their corresponding titles and format
them in a bulletted TeX list. This is used to indicate in the abstract
which courses have been deliberately discluded from the document
"""
string = '\\begin{itemize}\n'
for code in codes:
title = get_course_title_only(code)
string += '\\item{' + title + '}\n'
string += '\\end{itemize}\n'
return string
def write_to_latex(codelist, unwanted_courses):
"""Constructs the entire TeX document from all the courses with key
document details (like author and table of contents)
"""
# TODO: investigate a way to add large amounts of text outside of the
# function
abstract01 = "I created this document to practice parsing html and using\
tools like Beautiful Soup which I've previously had little experience\
in. As a result, it's not perfect.\\newline\
It is also a slightly condensed all-in-one-place look at a selection\
of courses that are available for fourth year computer science\
students at the University of Glasgow. For the purposes of clarity I\
have removed several courses from this selection. The following\
courses have been omitted:"
abstract02 = "For more insight into the project, to report issues or to\
inspect the code, have a look at the GitHub:\
\\url{https://github.com/IS0metric/course-ripper}"
unincluded = create_not_included_list(unwanted_courses)
with open('courses.tex', 'w') as f:
# TODO Try and move all this to a separate function?
# TODO: Check if it's more efficient to write a single, massive string
# to file
f.write('\\documentclass{hitec}\n')
f.write('\\usepackage[document]{ragged2e}\n')
f.write('\\usepackage{url}\n')
f.write('\\usepackage{hyperref}\n')
f.write('\\setcounter{tocdepth}{4}\n')
f.write('\\begin{document}\n')
f.write('\\title{Fourth Year (2016-17) Courses}\n')
f.write('\\author{Jack Parkinson}\n')
f.write('\\date{August 2016}\n')
f.write('\\maketitle\n')
f.write('\\abstract{' + abstract01 + unincluded + abstract02 + '}\n')
f.write('\\newpage\n\n')
f.write('\\tableofcontents\n')
f.write('\\newpage\n\n')
# TODO: Look into alternatives to the three lists
all_courses = []
sem1_courses = []
sem2_courses = []
for code in codelist:
course = bsoup(get_coursepage(code))
if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':
all_courses.append(course)
elif "1" in course['offered']['value']:
sem1_courses.append(course)
elif "2" in course['offered']['value']:
sem2_courses.append(course)
f.write('\\section{Semester 1 and 2 Courses}\n\n')
for course in all_courses:
f.write(latex_course(course))
f.write('\\section{Semester 1 Only Courses}\n\n')
for course in sem1_courses:
f.write(latex_course(course))
f.write('\\section{Semester 2 Only Courses}\n\n')
for course in sem2_courses:
f.write(latex_course(course))
f.write('\\end{document}')
return None
def latex_info(info):
"""Provides the special treatment that the info section requires"""
return '\\textbf{' + info['heading'] + '} ' + info['value'] + ' \\break\n'
def latex_subsection(section):
"""Creates a TeX formatted string for a given subsubsection"""
string = '\\subsubsection*{' + section['heading'] + '}\n'
string += section['value'] + '\n'
return string
def latex_course(course):
"""Creates a TeX formatted string for a course"""
basic_info_list = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students'
]
generic_subsection_list = [
'description', 'timetable', 'requirements_of_entry',
'excluded_courses', 'co_requisites', 'assessment_weighting'
]
string = '\\subsection{' + course["title"] + '}\n'
for info in basic_info_list:
string += latex_info(course[info])
for subsection in generic_subsection_list:
string += latex_subsection(course[subsection])
string += '\\break \\textbf{' + course['assessment_date'][
'heading'] + '}' + course['assessment_date']['value'] + '\n'
string += latex_subsection(course['aims'])
string += '\\subsubsection*{' + \
course['learning_outcomes']['heading'] + '}\n'
outcome_list = re.split(
'\d+\. ', course['learning_outcomes']['value'])
string += outcome_list[0] + '\n'
string += '\\begin{enumerate}\n'
for i in outcome_list[1:-1]:
string += '\\item ' + i + '\n'
string += '\\end{enumerate}\n'
return string
def create_tex(unwanted_courses, wanted_courses=None):
"""Creates the TeX document from the Computer Science Course Catalog"""
page = requests.get(
'http://gla.ac.uk/coursecatalogue/courselist/' +
'?code=REG30200000&name=School+of+Computing+Science')
tree = html.fromstring(page.content)
spans = tree.xpath('//span/text()')
codes = []
if wanted_courses is None:
for s in spans:
if s[0:4] == "COMP" and s[7] == '4' and s not in unwanted_courses:
codes.append(s)
else:
for s in wanted_courses:
codes.append(s)
write_to_latex(codes, unwanted_courses)
return None
def pdflatex(unwanted_courses):
"""Generates a TeX document and then runs the pdflatex command to create a
PDF from the TeX
"""
create_tex(unwanted_courses)
cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex']
proc = subprocess.Popen(cmd)
proc.communicate()
return None
if __name__ == "__main__":
# List of deliberately excluded courses
unwanted_courses = [
'COMPSCI4010', 'COMPSCI4009', 'COMPSCI4013', 'COMPSCI4024P',
'COMPSCI4014', 'COMPSCI4012', 'COMPSCI4011', 'COMPSCI4038',
'COMPSCI4015', 'COMPSCI4016', 'COMPSCI4046', 'COMPSCI4047',
'COMPSCI4044', 'COMPSCI4070', 'COMPSCI4038',
]
create_tex(unwanted_courses)
| course[info_tag] = new_dict(
info_list[i] + ': ', info_list[i + 1])
i += 2 | conditional_block |
course-ripper.py | import requests
from lxml import html
import subprocess
import os
import re
from bs4 import BeautifulSoup
'''
ideas: change the course data structure toa a list of dictionaries. Then each
dictionary has a 's_type' (section type: just using 'type' is illadvised
because it is built in to Python) key-value pair and the TeX creator will know
how to treat them based on that type
course = [
{
s_type: 'title',
value: 'The Course Title',
},
{
s_type: 'section',
heading: 'A Basic Section',
value: 'Some text for the section.',
},
{
s_type: 'section_aims',
heading: 'Course Aims',
value: [
'An aim',
'Another aim',
'One more aim',
],
}
{
s_type: 'info',
heading: 'An Info Heading'
value: 'The info'
}
]
Problems this solves:
+ no need for weird counting in parsing the BSoup
Problems it causes:
- need to figure out how to determine what type data is as it is read in
'''
def get_coursepage(code):
"""Given a course code, requests the correspnding course page"""
url = 'http://gla.ac.uk/coursecatalogue/course/?code=' + code
print url
coursepage = requests.get(url)
return coursepage
def get_course_title_only(code):
"""Given a course code, requests the corresponding page and returns only
the name of that course. This is used to generate a list of courses which I
have opted to disclude with only their course codes
"""
coursepage = get_coursepage(code)
soup = BeautifulSoup(coursepage.content, 'lxml')
title = [soup.find_all('h1')[2].string][0]
return title
def new_dict(heading, value):
|
def get_info_list(info_string, course):
"""Each course page has a small info section at the beginning, which I had
to extract and formulate in a different way to the main sections. This
function constructs the dictionary entries for he course when given a
string with all the details required for the info section
TODO:
There's definitely a better way to do this.
"""
info_list = []
split_on_newline = info_string.split("\n")
for elem in split_on_newline:
split = elem.split(": ")
for s in split:
info_list.append(s)
info_list = info_list[1:-1]
info_tags = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students',
]
i = 0
for info_tag in info_tags:
course[info_tag] = new_dict(
info_list[i] + ': ', info_list[i + 1])
i += 2
return course
def bsoup(coursepage):
"""Given a course page, takes the context and parses it to extract all the
useful information and construct a dictionary with the information
corresponding to assigned names ready to be written into the TeX file
TODO:
What a mess. There should be a way to do this by counting/incrementing.
"""
soup = BeautifulSoup(coursepage.content, 'lxml')
h1 = soup.find_all('h1')[2]
html = h1.find_next_siblings()
all_strings = [h1.string]
for div in html:
try:
text = div.get_text()
except:
text = div.string
if text is not None:
all_strings.append(text)
course = {'title': all_strings[0]}
course = get_info_list(all_strings[1], course)
course['description'] = new_dict(all_strings[2], all_strings[3])
course['timetable'] = new_dict(all_strings[4], all_strings[5])
course['requirements_of_entry'] = new_dict(all_strings[6], all_strings[7])
course['excluded_courses'] = new_dict(all_strings[8], all_strings[9])
course['co_requisites'] = new_dict(all_strings[10], all_strings[11])
course['assessment_weighting'] = new_dict(all_strings[12], all_strings[13])
course['aims'] = new_dict(all_strings[17], all_strings[18])
date = all_strings[14].split(': ')
course['assessment_date'] = new_dict(date[0] + ": ", date[1])
course['learning_outcomes'] = new_dict(all_strings[19], all_strings[20])
# TODO Doesn't parse Minimum Requirement for Award of Credit or
# Reassessment Options
return course
def create_not_included_list(codes):
"""Given a list of course codes, ge5t their corresponding titles and format
them in a bulletted TeX list. This is used to indicate in the abstract
which courses have been deliberately discluded from the document
"""
string = '\\begin{itemize}\n'
for code in codes:
title = get_course_title_only(code)
string += '\\item{' + title + '}\n'
string += '\\end{itemize}\n'
return string
def write_to_latex(codelist, unwanted_courses):
"""Constructs the entire TeX document from all the courses with key
document details (like author and table of contents)
"""
# TODO: investigate a way to add large amounts of text outside of the
# function
abstract01 = "I created this document to practice parsing html and using\
tools like Beautiful Soup which I've previously had little experience\
in. As a result, it's not perfect.\\newline\
It is also a slightly condensed all-in-one-place look at a selection\
of courses that are available for fourth year computer science\
students at the University of Glasgow. For the purposes of clarity I\
have removed several courses from this selection. The following\
courses have been omitted:"
abstract02 = "For more insight into the project, to report issues or to\
inspect the code, have a look at the GitHub:\
\\url{https://github.com/IS0metric/course-ripper}"
unincluded = create_not_included_list(unwanted_courses)
with open('courses.tex', 'w') as f:
# TODO Try and move all this to a separate function?
# TODO: Check if it's more efficient to write a single, massive string
# to file
f.write('\\documentclass{hitec}\n')
f.write('\\usepackage[document]{ragged2e}\n')
f.write('\\usepackage{url}\n')
f.write('\\usepackage{hyperref}\n')
f.write('\\setcounter{tocdepth}{4}\n')
f.write('\\begin{document}\n')
f.write('\\title{Fourth Year (2016-17) Courses}\n')
f.write('\\author{Jack Parkinson}\n')
f.write('\\date{August 2016}\n')
f.write('\\maketitle\n')
f.write('\\abstract{' + abstract01 + unincluded + abstract02 + '}\n')
f.write('\\newpage\n\n')
f.write('\\tableofcontents\n')
f.write('\\newpage\n\n')
# TODO: Look into alternatives to the three lists
all_courses = []
sem1_courses = []
sem2_courses = []
for code in codelist:
course = bsoup(get_coursepage(code))
if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':
all_courses.append(course)
elif "1" in course['offered']['value']:
sem1_courses.append(course)
elif "2" in course['offered']['value']:
sem2_courses.append(course)
f.write('\\section{Semester 1 and 2 Courses}\n\n')
for course in all_courses:
f.write(latex_course(course))
f.write('\\section{Semester 1 Only Courses}\n\n')
for course in sem1_courses:
f.write(latex_course(course))
f.write('\\section{Semester 2 Only Courses}\n\n')
for course in sem2_courses:
f.write(latex_course(course))
f.write('\\end{document}')
return None
def latex_info(info):
"""Provides the special treatment that the info section requires"""
return '\\textbf{' + info['heading'] + '} ' + info['value'] + ' \\break\n'
def latex_subsection(section):
"""Creates a TeX formatted string for a given subsubsection"""
string = '\\subsubsection*{' + section['heading'] + '}\n'
string += section['value'] + '\n'
return string
def latex_course(course):
"""Creates a TeX formatted string for a course"""
basic_info_list = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students'
]
generic_subsection_list = [
'description', 'timetable', 'requirements_of_entry',
'excluded_courses', 'co_requisites', 'assessment_weighting'
]
string = '\\subsection{' + course["title"] + '}\n'
for info in basic_info_list:
string += latex_info(course[info])
for subsection in generic_subsection_list:
string += latex_subsection(course[subsection])
string += '\\break \\textbf{' + course['assessment_date'][
'heading'] + '}' + course['assessment_date']['value'] + '\n'
string += latex_subsection(course['aims'])
string += '\\subsubsection*{' + \
course['learning_outcomes']['heading'] + '}\n'
outcome_list = re.split(
'\d+\. ', course['learning_outcomes']['value'])
string += outcome_list[0] + '\n'
string += '\\begin{enumerate}\n'
for i in outcome_list[1:-1]:
string += '\\item ' + i + '\n'
string += '\\end{enumerate}\n'
return string
def create_tex(unwanted_courses, wanted_courses=None):
"""Creates the TeX document from the Computer Science Course Catalog"""
page = requests.get(
'http://gla.ac.uk/coursecatalogue/courselist/' +
'?code=REG30200000&name=School+of+Computing+Science')
tree = html.fromstring(page.content)
spans = tree.xpath('//span/text()')
codes = []
if wanted_courses is None:
for s in spans:
if s[0:4] == "COMP" and s[7] == '4' and s not in unwanted_courses:
codes.append(s)
else:
for s in wanted_courses:
codes.append(s)
write_to_latex(codes, unwanted_courses)
return None
def pdflatex(unwanted_courses):
"""Generates a TeX document and then runs the pdflatex command to create a
PDF from the TeX
"""
create_tex(unwanted_courses)
cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex']
proc = subprocess.Popen(cmd)
proc.communicate()
return None
if __name__ == "__main__":
# List of deliberately excluded courses
unwanted_courses = [
'COMPSCI4010', 'COMPSCI4009', 'COMPSCI4013', 'COMPSCI4024P',
'COMPSCI4014', 'COMPSCI4012', 'COMPSCI4011', 'COMPSCI4038',
'COMPSCI4015', 'COMPSCI4016', 'COMPSCI4046', 'COMPSCI4047',
'COMPSCI4044', 'COMPSCI4070', 'COMPSCI4038',
]
create_tex(unwanted_courses)
| """Creates a dictionary with a heading-value pair, which is the structure
of all the sections in the courses dictionary
"""
value = value.replace('%', '\%').replace('&', '\&').replace(u'\xa0', ' ')
# Currently encoding is causeing me problems - the quick fix below removes
# all the characters that have broken the code so far. This solution is not
# likely to work if more courses were added
value = value.replace(u'\u25a0', '\\break').replace(u'\u037e', ';')
return {
'heading': heading,
'value': value,
} | identifier_body |
course-ripper.py | import requests
from lxml import html
import subprocess
import os
import re
from bs4 import BeautifulSoup
'''
ideas: change the course data structure toa a list of dictionaries. Then each
dictionary has a 's_type' (section type: just using 'type' is illadvised
because it is built in to Python) key-value pair and the TeX creator will know
how to treat them based on that type
course = [
{
s_type: 'title',
value: 'The Course Title',
},
{
s_type: 'section',
heading: 'A Basic Section',
value: 'Some text for the section.',
},
{
s_type: 'section_aims',
heading: 'Course Aims',
value: [
'An aim',
'Another aim',
'One more aim',
],
}
{
s_type: 'info',
heading: 'An Info Heading'
value: 'The info'
}
]
Problems this solves:
+ no need for weird counting in parsing the BSoup
Problems it causes:
- need to figure out how to determine what type data is as it is read in
'''
def get_coursepage(code):
"""Given a course code, requests the correspnding course page"""
url = 'http://gla.ac.uk/coursecatalogue/course/?code=' + code
print url
coursepage = requests.get(url)
return coursepage
def get_course_title_only(code):
"""Given a course code, requests the corresponding page and returns only
the name of that course. This is used to generate a list of courses which I
have opted to disclude with only their course codes
"""
coursepage = get_coursepage(code)
soup = BeautifulSoup(coursepage.content, 'lxml')
title = [soup.find_all('h1')[2].string][0]
return title
def new_dict(heading, value):
"""Creates a dictionary with a heading-value pair, which is the structure
of all the sections in the courses dictionary
"""
value = value.replace('%', '\%').replace('&', '\&').replace(u'\xa0', ' ')
# Currently encoding is causeing me problems - the quick fix below removes
# all the characters that have broken the code so far. This solution is not
# likely to work if more courses were added
value = value.replace(u'\u25a0', '\\break').replace(u'\u037e', ';')
return {
'heading': heading,
'value': value,
}
def get_info_list(info_string, course):
"""Each course page has a small info section at the beginning, which I had
to extract and formulate in a different way to the main sections. This
function constructs the dictionary entries for he course when given a
string with all the details required for the info section
TODO: | There's definitely a better way to do this.
"""
info_list = []
split_on_newline = info_string.split("\n")
for elem in split_on_newline:
split = elem.split(": ")
for s in split:
info_list.append(s)
info_list = info_list[1:-1]
info_tags = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students',
]
i = 0
for info_tag in info_tags:
course[info_tag] = new_dict(
info_list[i] + ': ', info_list[i + 1])
i += 2
return course
def bsoup(coursepage):
"""Given a course page, takes the context and parses it to extract all the
useful information and construct a dictionary with the information
corresponding to assigned names ready to be written into the TeX file
TODO:
What a mess. There should be a way to do this by counting/incrementing.
"""
soup = BeautifulSoup(coursepage.content, 'lxml')
h1 = soup.find_all('h1')[2]
html = h1.find_next_siblings()
all_strings = [h1.string]
for div in html:
try:
text = div.get_text()
except:
text = div.string
if text is not None:
all_strings.append(text)
course = {'title': all_strings[0]}
course = get_info_list(all_strings[1], course)
course['description'] = new_dict(all_strings[2], all_strings[3])
course['timetable'] = new_dict(all_strings[4], all_strings[5])
course['requirements_of_entry'] = new_dict(all_strings[6], all_strings[7])
course['excluded_courses'] = new_dict(all_strings[8], all_strings[9])
course['co_requisites'] = new_dict(all_strings[10], all_strings[11])
course['assessment_weighting'] = new_dict(all_strings[12], all_strings[13])
course['aims'] = new_dict(all_strings[17], all_strings[18])
date = all_strings[14].split(': ')
course['assessment_date'] = new_dict(date[0] + ": ", date[1])
course['learning_outcomes'] = new_dict(all_strings[19], all_strings[20])
# TODO Doesn't parse Minimum Requirement for Award of Credit or
# Reassessment Options
return course
def create_not_included_list(codes):
"""Given a list of course codes, ge5t their corresponding titles and format
them in a bulletted TeX list. This is used to indicate in the abstract
which courses have been deliberately discluded from the document
"""
string = '\\begin{itemize}\n'
for code in codes:
title = get_course_title_only(code)
string += '\\item{' + title + '}\n'
string += '\\end{itemize}\n'
return string
def write_to_latex(codelist, unwanted_courses):
"""Constructs the entire TeX document from all the courses with key
document details (like author and table of contents)
"""
# TODO: investigate a way to add large amounts of text outside of the
# function
abstract01 = "I created this document to practice parsing html and using\
tools like Beautiful Soup which I've previously had little experience\
in. As a result, it's not perfect.\\newline\
It is also a slightly condensed all-in-one-place look at a selection\
of courses that are available for fourth year computer science\
students at the University of Glasgow. For the purposes of clarity I\
have removed several courses from this selection. The following\
courses have been omitted:"
abstract02 = "For more insight into the project, to report issues or to\
inspect the code, have a look at the GitHub:\
\\url{https://github.com/IS0metric/course-ripper}"
unincluded = create_not_included_list(unwanted_courses)
with open('courses.tex', 'w') as f:
# TODO Try and move all this to a separate function?
# TODO: Check if it's more efficient to write a single, massive string
# to file
f.write('\\documentclass{hitec}\n')
f.write('\\usepackage[document]{ragged2e}\n')
f.write('\\usepackage{url}\n')
f.write('\\usepackage{hyperref}\n')
f.write('\\setcounter{tocdepth}{4}\n')
f.write('\\begin{document}\n')
f.write('\\title{Fourth Year (2016-17) Courses}\n')
f.write('\\author{Jack Parkinson}\n')
f.write('\\date{August 2016}\n')
f.write('\\maketitle\n')
f.write('\\abstract{' + abstract01 + unincluded + abstract02 + '}\n')
f.write('\\newpage\n\n')
f.write('\\tableofcontents\n')
f.write('\\newpage\n\n')
# TODO: Look into alternatives to the three lists
all_courses = []
sem1_courses = []
sem2_courses = []
for code in codelist:
course = bsoup(get_coursepage(code))
if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':
all_courses.append(course)
elif "1" in course['offered']['value']:
sem1_courses.append(course)
elif "2" in course['offered']['value']:
sem2_courses.append(course)
f.write('\\section{Semester 1 and 2 Courses}\n\n')
for course in all_courses:
f.write(latex_course(course))
f.write('\\section{Semester 1 Only Courses}\n\n')
for course in sem1_courses:
f.write(latex_course(course))
f.write('\\section{Semester 2 Only Courses}\n\n')
for course in sem2_courses:
f.write(latex_course(course))
f.write('\\end{document}')
return None
def latex_info(info):
"""Provides the special treatment that the info section requires"""
return '\\textbf{' + info['heading'] + '} ' + info['value'] + ' \\break\n'
def latex_subsection(section):
"""Creates a TeX formatted string for a given subsubsection"""
string = '\\subsubsection*{' + section['heading'] + '}\n'
string += section['value'] + '\n'
return string
def latex_course(course):
"""Creates a TeX formatted string for a course"""
basic_info_list = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students'
]
generic_subsection_list = [
'description', 'timetable', 'requirements_of_entry',
'excluded_courses', 'co_requisites', 'assessment_weighting'
]
string = '\\subsection{' + course["title"] + '}\n'
for info in basic_info_list:
string += latex_info(course[info])
for subsection in generic_subsection_list:
string += latex_subsection(course[subsection])
string += '\\break \\textbf{' + course['assessment_date'][
'heading'] + '}' + course['assessment_date']['value'] + '\n'
string += latex_subsection(course['aims'])
string += '\\subsubsection*{' + \
course['learning_outcomes']['heading'] + '}\n'
outcome_list = re.split(
'\d+\. ', course['learning_outcomes']['value'])
string += outcome_list[0] + '\n'
string += '\\begin{enumerate}\n'
for i in outcome_list[1:-1]:
string += '\\item ' + i + '\n'
string += '\\end{enumerate}\n'
return string
def create_tex(unwanted_courses, wanted_courses=None):
"""Creates the TeX document from the Computer Science Course Catalog"""
page = requests.get(
'http://gla.ac.uk/coursecatalogue/courselist/' +
'?code=REG30200000&name=School+of+Computing+Science')
tree = html.fromstring(page.content)
spans = tree.xpath('//span/text()')
codes = []
if wanted_courses is None:
for s in spans:
if s[0:4] == "COMP" and s[7] == '4' and s not in unwanted_courses:
codes.append(s)
else:
for s in wanted_courses:
codes.append(s)
write_to_latex(codes, unwanted_courses)
return None
def pdflatex(unwanted_courses):
"""Generates a TeX document and then runs the pdflatex command to create a
PDF from the TeX
"""
create_tex(unwanted_courses)
cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex']
proc = subprocess.Popen(cmd)
proc.communicate()
return None
if __name__ == "__main__":
# List of deliberately excluded courses
unwanted_courses = [
'COMPSCI4010', 'COMPSCI4009', 'COMPSCI4013', 'COMPSCI4024P',
'COMPSCI4014', 'COMPSCI4012', 'COMPSCI4011', 'COMPSCI4038',
'COMPSCI4015', 'COMPSCI4016', 'COMPSCI4046', 'COMPSCI4047',
'COMPSCI4044', 'COMPSCI4070', 'COMPSCI4038',
]
create_tex(unwanted_courses) | random_line_split | |
storeDetail.js | $(document).ready(function() {
console.log(window.location.pathname.indexOf("admin"));
// 관리자페이지에서만 보이기
if (window.location.pathname.indexOf("admin") == 1) {
$("#admin_button_box").css("display", "block");
}
let cartStoreNum = null; // 카트에 담긴 메뉴의 가게번호, 서로 다른가게에서 담으면 안됨
let size = $(window).width();
$(window).resize(function() {
size = $(window).width();
console.log(size);
})
// -------------------- 주문하기 버튼 클릭 -----------------------
$(".order_btn").click(function() {
location.href = "/order";
});
$(".cart_img_box").click(function() {
location.href = "/order";
});
// -------------------- 주문하기 버튼 클릭 -----------------------
// -------------------- 가게 별점 --------------------
let score = Math.round($("#score").val());
if (score <= 0) {
score = 0;
}
| // -------------------- 가게 별점 --------------------
// -------------------- 리뷰탭 그래프 --------------------
const reviewCount = $("#review_count").val();
const fiveScore = $("#five_score").val() / reviewCount * 100 + "%";
const fourScore = $("#four_score").val() / reviewCount * 100 + "%";
const threeScore = $("#three_score").val() / reviewCount * 100 + "%";
const twoScore = $("#two_score").val() / reviewCount * 100 + "%";
const oneScore = $("#one_score").val() / reviewCount * 100 + "%";
$(".graph.five").css("background", "gold").css("width", fiveScore);
$(".graph.four").css("background", "gold").css("width", fourScore);
$(".graph.three").css("background", "gold").css("width", threeScore);
$(".graph.two").css("background", "gold").css("width", twoScore);
$(".graph.one").css("background", "gold").css("width", oneScore);
// -------------------- 리뷰탭 그래프 --------------------
// -------------------- 찜 하기 --------------------
const storeNum = $(".store_num").val();
const userId = $(".user_id").val();
let dibsCheck = $(".dibs_check").val(); // 찜 여부
$(".fa-heart").click(function() {
if (dibsCheck == 0) { // 찜 안되있을때
$(this).removeClass("far");
$(this).addClass("fas");
dibsCheck = 1;
if (userId != "guest") {
const count = Number($(".count").html());
$(".count").html(count + 1);
}
} else { // 찜 되있을대
$(this).removeClass("fas");
$(this).addClass("far");
dibsCheck = 0;
if (userId != "guest") {
const count = Number($(".count").html());
$(".count").html(count - 1);
}
}
dibs();
})
function dibs() {
let data = {
storeNum: storeNum
};
$.ajax({
url: "/dibs",
type: "post",
async: false,
data: data,
success: function(result) {
} // success
}); // ajax
};
// -------------------- 찜 하기 --------------------
// ------------------ 카트 리스트 ----------------------
cartList("/cartList", "");
// ----------------- 카트 리스트 ----------------------
// ------------------메뉴 리스트 클릭----------------------
const deleveryTip = $(".delevery_tip").val();
const storeName = $(".store_name").val();
let menuNum;
let menuName;
let menuDec;
let menuPrice = 0; // 1개 가격
let sum = 0; // 메뉴 1개 총 가격
// let total =0 ; // 장바구니 총 가격
const addCartModal = $(".addCart_modal");
const menuList = $(".menu > li .in"); //메뉴 리스트
menuList.click(function() { // 메뉴 모달창 열기
console.log("cartStoreNum =" + cartStoreNum);
openModal(addCartModal, size);
menuNum = $(this).find("#menu_num").val();
menuName = $(this).find("#menu_name").val();
menuPrice = Number($(this).find("#menu_price").val());
sum = menuPrice;
menuDec = $(this).find("#menu_dec").val();
let data = {
storeNum: storeNum,
menuNum: menuNum
};
$.ajax({
url: "/menuDetail",
type: "post",
async: false,
data: data,
success: function(result) {
console.log(result);
console.log(result["menuDetail"]["menuImg"]);
$(".menu_name").html(menuName);
$(".menu_dec").html(menuDec);
$(".menu_img").attr("src", result["menuDetail"]["menuImg"]);
$(".menu_price").html(menuPrice.toLocaleString() + "원");
$(".menu_sum").html(menuPrice.toLocaleString() + "원");
let ht = "";
$("#option ul").html(ht);
if (result["menuOption"] != null && result["menuOption"] != "") {
console.log("옵션");
$("#option").css("display", "block");
for (var i = 0; i < result["menuOption"].length; i++) {
ht += `<li>
<div class="option_box">
<span>
<input type="checkbox" class="menu_option" name="option" value="${result["menuOption"][i]["optionName"]}"> ${result["menuOption"][i]["optionName"]}
<input type="hidden" class="option_price" value="${result["menuOption"][i]["optionPrice"]}">
<input type="hidden" class="option_num" value="${result["menuOption"][i]["optionNum"]}">
</span>
<span> ${result["menuOption"][i]["optionPrice"].toLocaleString()}원</span>
</div>
</li>`;
}
$("#option ul").html(ht);
} else {
$("#option").css("display", "none");
}
}
}); // ajax
}); // 메뉴 클릭 메서드
/* ---------------------- 옵션 선택 --------------------- */
let optionPrice = 0;
$(document).on("change", "input[name='option']", function() {
if ($(this).is(":checked")) {
optionPrice += Number($(this).siblings(".option_price").val());
} else {
optionPrice -= Number($(this).siblings(".option_price").val());
}
sum = (menuPrice + optionPrice) * amount;
$(".menu_sum").html(sum.toLocaleString() + "원");
});
/* ---------------------- 옵션 선택 --------------------- */
/* ---------------------수량 증가 감소--------------------- */
const amountBox = $("#amount");
let amount = 1;
$(".amount_box button").click(function() {
if ($(this).hasClass("minus")) {
amountBox.val() == 1 ? amountBox.val(amountBox.val()) : amountBox.val(Number(amountBox.val()) - 1);
} else if ($(this).hasClass("plus")) {
amountBox.val(Number(amountBox.val()) + 1);
}
amount = amountBox.val(); // 현재 출력중인 숫자
sum = (menuPrice + optionPrice) * amount;
$(".menu_sum").html(sum.toLocaleString() + "원");
})
/* ----------------------수량 증가 감소--------------------- */
/* ---------------------- 장바구니에 담기 --------------------- */
var cartCount = 0; // 모바일 카트
$(".add_cart").click(function() {
if ((cartStoreNum == null) || (storeNum == cartStoreNum)) {
addCart();
return;
}
if (cartStoreNum != null && storeNum != cartStoreNum) {
swal({
buttons: ["취소", "담기"],
title: "장바구니에는 같은 가게의 메뉴만 담을 수 있습니다",
text: "선택하신 메뉴를 장바구니에 담을 경우 이전에 담은 메뉴가 삭제됩니다"
})
.then((value) => {
if (value == true) {
deleteCartAll();
addCart();
}
});
}
}); // 장바구니에 담기 버튼 클릭
/* ---------------------- 장바구니에 담기 --------------------- */
/* ---------------------- 장바구니 1개 삭제 --------------------- */
$(document).on("click", ".cancle_btn", function() { // .html로 생성된 부분은 on 메서드를 사용해야된다
const index = $(this).parent().index();
console.log("index = " + index);
let data = {
index: index
};
cartList("/deleteCartOne", data);
}); // 삭제 버튼 클릭
/* ---------------------- 장바구니 1개 삭제 --------------------- */
/* ---------------------- 장바구니 전체 삭제 --------------------- */
$(".deleteAll").click(function() {
deleteCartAll();
}); // 전체 삭제
/* ---------------------- 장바구니 전체 삭제 --------------------- */
function cartEmpryCheck(total) {
const minDelevery = Number($("#min_delevery").val());
if ($(".cart ul ").html() != "") {
if (total < minDelevery) {
$(".order_btn").attr("disabled", true);
$(".order_btn").css("background", "#ddd");
$(".order_btn").html(`${minDelevery.toLocaleString()}원 이상 주문할 수 있습니다`);
} else {
$(".order_btn").attr("disabled", false); // 상품을 담으면 주문하기버튼 활성화
$(".order_btn").css("background", "#30DAD9");
}
} else {
$(".order_btn").attr("disabled", true);
$(".order_btn").css("background", "#ddd");
$(".order_btn").html("주문하기");
cartStoreNum = null;
}
};
let total = $(".total").data("total");
function cartList(url, data) {
$.ajax({
url: url,
type: "post",
data: data,
async: false,
traditional: true,
success: function(result) {
console.log(result);
let ht = "";
let total = 0;
if (result.length == 0) {
$(".total").html("장바구니가 비었습니다.");
$(".cart ul").html(ht); // 장바구니 목록 초기화
cartEmpryCheck(total);
return;
}
for (var i = 0; i < result.length; i++) {
let optionHt = "";
let optionTotal = 0;
if (result[i]["menuOption"] != null && result[i]["menuOption"] != "") {
for (var j = 0; j < result[i]["menuOption"].length; j++) {
let menuOption = result[i]["menuOption"][j];
let menuOptionPrice = Number(result[i]["menuOptionPrice"][j]);
optionHt += "<div class='cart_menu_option'>" + menuOption + " " + menuOptionPrice.toLocaleString() + "원</div>";
optionTotal += menuOptionPrice;
}
}
ht += `<li>
<h3>${result[i]["menuName"]}</h3>
<div>${result[i]["menuPrice"].toLocaleString()}원</div>
<div>수량 : ${result[i]["amount"]}</div>
<div> ${optionHt} </div>
<div>합계 : ${((result[i]["menuPrice"] + optionTotal) * result[i]["amount"]).toLocaleString()}원</div>
<button class="cancle_btn"> ${"x"} </button>
</li>`; // 장바구니 추가하면 장바구니 리스트 변경
total += (result[i]["menuPrice"] + optionTotal) * result[i]["amount"];
}
$(".cart ul").html(ht);
if (!result) {
$(".total").html("장바구니가 비었습니다.");
} else {
$(".total").html("총 합계 : " + total.toLocaleString() + "원");
}
cartEmpryCheck(total);
/* 모바일 카트 */
if (result.length > 0) {
$(".cart_count").html(result.length);
$(".cart_count").css("display", "block");
cartCount = result.length;
}
/* 모바일 카트 */
$(".total").data("total", total); // 전역변수에 세팅
cartStoreNum = result[0]["storeNum"];
} // success
}); // ajax
};
function addCart() {
const menuOption = new Array();
const menuOptionPrice = new Array();
$("input[type='checkBox']:checked").each(function() {
menuOption.push($(this).val());
menuOptionPrice.push($(this).siblings(".option_price").val());
})
console.log("StoreName = " + storeName);
console.log("menuOption= " + menuOption);
console.log("menuOptionPrice= " + menuOptionPrice);
let data = {
menuName: menuName,
storeNum: storeNum,
storeName: storeName,
menuNum: menuNum,
menuPrice: menuPrice,
amount: amount,
deleveryTip: deleveryTip,
menuOption: menuOption,
menuOptionPrice: menuOptionPrice
};
cartList("/addCart", data);
swal("장바구니에 추가되었습니다.", "", "success", {
timer: 800,
buttons: "확인"
});
console.log("카트 상품 수 = " + cartCount);
$("#amount").val(1);
cartCount += 1; // 모바일 카트
amount = 1;
optionPrice = 0;
closeModal();
}
function deleteCartAll() {
$.ajax({
url: "/deleteCartAll",
type: "post",
async: false,
success: function() {
$(".cart ul ").html("");
cartEmpryCheck(total);
$(".total").html("장바구니가 비었습니다.");
}
}); // ajax
}
const modal = $(".modal");
const modalBg = $(".modal_bg");
const closeA = $(".closeA");
const closeB = $(".closeB");
const modal2 = $(".modal2"); // 추가
const cancle = $(".cancle");
closeB.click(function() {
closeModal();
});
modalBg.click(function() {
closeModal();
});
closeA.click(function() {
closeModal();
});
$(".addCart").click(function() {
closeModal();
}); // 장바구니에 담기 버튼 클릭
$("#accept").click(function() {
closeModal();
});
cancle.click(function() {
closeModal();
})
function closeModal() {
modal.scrollTop(0);
modal2.scrollTop(0);
modalBg.hide();
modal.css("top", "100%");
$(".modal_box").scrollTop(0);
$("body").css("overflow", "visible");
$("#amount").val(1);
optionPrice = 0;
/* $("input[type='checkBox']").prop("checked", false); */
$(".plusOption").remove();
};
//탭 눌렀을때 색변경 콘텐츠 변경
$("main ul").eq(2).hide();
$("main ul").eq(3).hide();
const tab = $("ul.tab > li");
const menu = $(".menu > li");
tab.click(function() {
const index = $(this).index() + 1;
tab.removeClass("select");
$(this).addClass("select");
$("main ul").eq(1).hide();
$("main ul").eq(2).hide();
$("main ul").eq(3).hide();
$("main ul").eq(index).show();
const offset = $(".offset").offset();
const scrollPosition = $(document).scrollTop();
console.log(offset);
console.log(offset["top"]);
if (offset["top"] < scrollPosition) {
$("html").animate({ scrollTop: offset.top }, 100);
}
});
//탭 눌렀을때 색변경 콘텐츠 변경
/* ---------------------------- 이미지 확대 ---------------------------- */
let zoom = false;
$(".review_img").click(function() {
if (zoom == false) {
$(this).css("transition", "0.3s").css("width", "100%");
zoom = true;
} else {
$(this).css("transition", "0.3s").css("width", "30%");
zoom = false;
}
});
/* ---------------------------- 이미지 확대 ---------------------------- */
}); | $(".score_box i").eq(score).addClass("fas").prevAll().addClass("fas");
| random_line_split |
storeDetail.js |
$(document).ready(function() {
console.log(window.location.pathname.indexOf("admin"));
// 관리자페이지에서만 보이기
if (window.location.pathname.indexOf("admin") == 1) {
$("#admin_button_box").css("display", "block");
}
let cartStoreNum = null; // 카트에 담긴 메뉴의 가게번호, 서로 다른가게에서 담으면 안됨
let size = $(window).width();
$(window).resize(function() {
size = $(window).width();
console.log(size);
})
// -------------------- 주문하기 버튼 클릭 -----------------------
$(".order_btn").click(function() {
location.href = "/order";
});
$(".cart_img_box").click(function() {
location.href = "/order";
});
// -------------------- 주문하기 버튼 클릭 -----------------------
// -------------------- 가게 별점 --------------------
let score = Math.round($("#score").val());
if (score <= 0) {
score = 0;
}
$(".score_box i").eq(score).addClass("fas").prevAll().addClass("fas");
// -------------------- 가게 별점 --------------------
// -------------------- 리뷰탭 그래프 --------------------
const reviewCount = $("#review_count").val();
const fiveScore = $("#five_score").val() / reviewCount * 100 + "%";
const fourScore = $("#four_score").val() / reviewCount * 100 + "%";
const threeScore = $("#three_score").val() / reviewCount * 100 + "%";
const twoScore = $("#two_score").val() / reviewCount * 100 + "%";
const oneScore = $("#one_score").val() / reviewCount * 100 + "%";
$(".graph.five").css("background", "gold").css("width", fiveScore);
$(".graph.four").css("background", "gold").css("width", fourScore);
$(".graph.three").css("background", "gold").css("width", threeScore);
$(".graph.two").css("background", "gold").css("width", twoScore);
$(".graph.one").css("background", "gold").css("width", oneScore);
// -------------------- 리뷰탭 그래프 --------------------
// -------------------- 찜 하기 --------------------
const storeNum = $(".store_num").val();
const userId = $(".user_id").val();
let dibsCheck = $(".dibs_check").val(); // 찜 여부
$(".fa-heart").click(function() {
if (dibsCheck == 0) { // 찜 안되있을때
$(this).removeClass("far");
$(this).addClass("fas");
dibsCheck = 1;
if (userId != "guest") {
const count = Number($(".count").html());
$(".count").html(count + 1);
}
} else { // 찜 되있을대
$(this).removeClass("fas");
$(this).addClass("far");
dibsCheck = 0;
if (userId != "guest") {
const count = Number($(".count").html());
$(".count").html(count - 1);
}
}
dibs();
})
function dibs() {
let data = {
storeNum: storeNum
};
$.ajax({
url: "/dibs",
type: "post",
async: false,
data: data,
success: function(result) {
} // success
}); // ajax
};
// -------------------- 찜 하기 --------------------
// ------------------ 카트 리스트 ----------------------
cartList("/cartList", "");
// ----------------- 카트 리스트 ----------------------
// ------------------메뉴 리스트 클릭----------------------
const deleveryTip = $(".delevery_tip").val();
const storeName = $(".store_name").val();
let menuNum;
let menuName;
let menuDec;
let menuPrice = 0; // 1개 가격
let sum = 0; // 메뉴 1개 총 가격
// let total =0 ; // 장바구니 총 가격
const addCartModal = $(".addCart_modal");
const menuList = $(".menu > li .in"); //메뉴 리스트
menuList.click(function() { // 메뉴 모달창 열기
console.log("cartStoreNum =" + cartStoreNum);
openModal(addCartModal, size);
menuNum = $(this).find("#menu_num").val();
menuName = $(this).find("#menu_name").val();
menuPrice = Number($(this).find("#menu_price").val());
sum = menuPrice;
menuDec = $(this).find("#menu_dec").val();
let data = {
storeNum: storeNum,
menuNum: menuNum
};
$.ajax({
url: "/menuDetail",
type: "post",
async: false,
data: data,
success: function(result) {
console.log(result);
console.log(result["menuDetail"]["menuImg"]);
$(".menu_name").html(menuName);
$(".menu_dec").html(menuDec);
$(".menu_img").attr("src", result["menuDetail"]["menuImg"]);
$(".menu_price").html(menuPrice.toLocaleString() + "원");
$(".menu_sum").html(menuPrice.toLocaleString() + "원");
let ht = "";
$("#option ul").html(ht);
if (result["menuOption"] != null && result["menuOption"] != "") {
console.log("옵션");
$("#option").css("display", "block");
for (var i = 0; i < result["menuOption"].length; i++) {
ht += `<li>
<div class="option_box">
<span>
<input type="checkbox" class="menu_option" name="option" value="${result["menuOption"][i]["optionName"]}"> ${result["menuOption"][i]["optionName"]}
<input type="hidden" class="option_price" value="${result["menuOption"][i]["optionPrice"]}">
<input type="hidden" class="option_num" value="${result["menuOption"][i]["optionNum"]}">
</span>
<span> ${result["menuOption"][i]["optionPrice"].toLocaleString()}원</span>
</div>
</li>`;
}
$("#option ul").html(ht);
} else {
$("#option").css("display", "none");
}
}
}); // ajax
}); // 메뉴 클릭 메서드
/* ---------------------- 옵션 선택 --------------------- */
let optionPrice = 0;
$(document).on("change", "input[name='option']", function() {
if ($(this).is(":checked")) {
optionPrice += Number($(this).siblings(".option_price").val());
} else {
optionPrice -= Number($(this).siblings(".option_price").val());
}
sum = (menuPrice + optionPrice) * amount;
$(".menu_sum").html(sum.toLocaleString() + "원");
});
/* ---------------------- 옵션 선택 --------------------- */
/* ---------------------수량 증가 감소--------------------- */
const amountBox = $("#amount");
let amount = 1;
$(".amount_box button").click(function() {
if ($(this).hasClass("minus")) {
amountBox.val() == 1 ? amountBox.val(amountBox.val()) : amountBox.val(Number(amountBox.val()) - 1);
} else if ($(this).hasClass("plus")) {
amountBox.val(Number(amountBox.val()) + 1);
}
amount = amountBox.val(); // 현재 출력중인 숫자
sum = (menuPrice + optionPrice) * amount;
$(".menu_sum").html(sum.toLocaleString() + "원");
})
/* ----------------------수량 증가 감소--------------------- */
/* ---------------------- 장바구니에 담기 --------------------- */
var cartCount = 0; // 모바일 카트
$(".add_cart").click(function() {
if ((cartStoreNum == null) || (storeNum == cartStoreNum)) {
addCart();
return;
}
if (cartStoreNum != null && storeNum != cartStoreNum) {
swal({
buttons: ["취소", "담기"],
title: "장바구니에는 같은 가게의 메뉴만 담을 수 있습니다",
text: "선택하신 메뉴를 장바구니에 담을 경우 이전에 담은 메뉴가 삭제됩니다"
})
.then((value) => {
if (value == true) {
deleteCartAll();
addCart();
}
});
}
}); // 장바구니에 담기 버튼 클릭
/* ---------------------- 장바구니에 담기 --------------------- */
/* ---------------------- 장바구니 1개 삭제 --------------------- */
$(document).on("click", ".cancle_btn", function() { // .html로 생성된 부분은 on 메서드를 사용해야된다
const index = $(this).parent().index();
console.log("index = " + index);
let data = {
index: index
};
cartList("/deleteCartOne", data);
}); // 삭제 버튼 클릭
/* ---------------------- 장바구니 1개 삭제 --------------------- */
/* ---------------------- 장바구니 전체 삭제 --------------------- */
$(".deleteAll").click(function() {
deleteCartAll();
}); // 전체 삭제
/* ---------------------- 장바구니 전체 삭제 --------------------- */
function cartEmpryCheck(total) {
const minDelevery = Number($("#min_delevery").val());
if ($(".cart ul ").html() != "") {
if (total < minDelevery) {
$(".order_btn").attr("disabled", true);
$(".order_btn").css("background", "#ddd");
$(".order_btn").html(`${minDelevery.toLocaleString()}원 이상 주문할 수 있습니다`);
} else {
$(".order_btn").attr("disabled", false); // 상품을 담으면 주문하기버튼 활성화
$(".order_btn").css("background", "#30DAD9");
}
} else {
$(".order_btn").attr("disabled", true);
$(".order_btn").css("background", "#ddd");
$(".order_btn").html("주문하기");
cartStoreNum = null;
}
};
let total = $(".total").data("total");
function cartList(url, data) {
$.ajax({
url: url,
type: "post",
data: data,
async: false,
traditional: true,
success: function(result) {
console.log(result);
let ht = "";
let total = 0;
if (result.length == 0) {
$(".total").html("장바구니가 비었습니다.");
$(".cart ul").html(ht); // 장바구니 목록 초기화
cartEmpryCheck(total);
return;
}
for (var i = 0; i < result.length; i++) {
let optionHt = "";
let optionTotal = 0;
if (result[i]["menuOption"] != null && result[i]["menuOption"] != "") {
for (var j = 0; j < result[i]["menuOption"].length; j++) {
let menuOption = result[i]["menuOption"][j];
let menuOptionPrice = Number(result[i]["menuOptionPrice"][j]);
optionHt += "<div class='cart_menu_option'>" + menuOption + " " + menuOptionPrice.toLocaleString() + "원</div>";
optionTotal += menuOptionPrice;
}
}
ht += `<li>
<h3>${result[i]["menuName"]}</h3>
<div>${result[i]["menuPrice"].toLocaleString()}원</div>
<div>수량 : ${result[i]["amount"]}</div>
<div> ${optionHt} </div>
<div>합계 : ${((result[i]["menuPrice"] + optionTotal) * result[i]["amount"]).toLocaleString()}원</div>
<button class="cancle_btn"> ${"x"} </button>
</li>`; // 장바구니 추가하면 장바구니 리스트 변경
total += (result[i]["menuPrice"] + optionTotal) * result[i]["amount"];
}
$(".cart ul").html(ht);
if (!result) {
$(".total").html("장바구니가 비었습니다.");
} else {
$(".total").html("총 합계 : " + total.toLocaleString() + "원");
}
cartEmpryCheck(total);
/* 모바일 카트 */
if (result.length > 0) {
$(".cart_count").html(result.length);
$(".cart_count").css("display", "block");
cartCount = result.length;
}
/* 모바일 카트 */
$(".total").data("total", total); // 전역변수에 세팅
cartStoreNum = result[0]["storeNum"];
} // success
}); // ajax
};
function addCart() {
const menuOption = new Array();
const menuOptionPrice = new Array();
$("input[type='checkBox']:checked").each(function() {
menuOption.push($(this).val());
menuOptionPrice.push($(this).siblings(".option_price").val());
})
console.log("StoreName = " + storeName);
console.log("menuOption= " + menuOption);
console.log("menuOptionPrice= " + menuOptionPrice);
let data = {
menuName: menuName,
storeNum: storeNum,
storeName: storeName,
menuNum: menuNum,
menuPrice: menuPrice,
amount: amount,
deleveryTip: deleveryTip,
menuOption: menuOption,
menuOptionPrice: menuOptionPrice
};
cartList("/addCart", data);
swal("장바구니에 추가되었습니다.", "", "success", {
timer: 800,
buttons: "확인"
});
console.log("카트 상품 수 = " + cartCount);
$("#amount").val(1);
cartCount += 1; // 모바일 카트
amount = 1;
optionPrice = 0;
closeModal();
}
function deleteCartAll() {
$.ajax({
url: "/deleteCartAll",
type: "post",
async: false,
success: function() {
$(".cart ul ").html("");
cartEmpryCheck(total);
$(".total").html("장바구니가 비었습니다.");
}
}); // ajax
}
const modal = $(".modal");
const modalBg = $(".modal_bg");
const closeA = $(".closeA");
const closeB = $(".closeB");
const modal2 = $(".modal2"); // 추가
const cancle = $(".cancle");
closeB.click(function() {
closeModal();
});
modalBg.click(function() {
closeModal();
});
closeA.click(function() {
closeModal();
});
$(".addCart").click(function() {
closeModal();
}); // 장바구니에 담기 버튼 클릭
$("#accept").click(function() {
closeModal();
});
cancle.click(function() {
closeModal();
})
function closeModal() {
modal.scrollTop(0);
modal2.scrollTop(0);
modalBg.hide();
modal.css("top", "100%");
$(".modal_box").scrollTop(0);
$("bo | ).eq(3).hide();
const tab = $("ul.tab > li");
const menu = $(".menu > li");
tab.click(function() {
const index = $(this).index() + 1;
tab.removeClass("select");
$(this).addClass("select");
$("main ul").eq(1).hide();
$("main ul").eq(2).hide();
$("main ul").eq(3).hide();
$("main ul").eq(index).show();
const offset = $(".offset").offset();
const scrollPosition = $(document).scrollTop();
console.log(offset);
console.log(offset["top"]);
if (offset["top"] < scrollPosition) {
$("html").animate({ scrollTop: offset.top }, 100);
}
});
//탭 눌렀을때 색변경 콘텐츠 변경
/* ---------------------------- 이미지 확대 ---------------------------- */
let zoom = false;
$(".review_img").click(function() {
if (zoom == false) {
$(this).css("transition", "0.3s").css("width", "100%");
zoom = true;
} else {
$(this).css("transition", "0.3s").css("width", "30%");
zoom = false;
}
});
/* ---------------------------- 이미지 확대 ---------------------------- */
}); | dy").css("overflow", "visible");
$("#amount").val(1);
optionPrice = 0;
/* $("input[type='checkBox']").prop("checked", false); */
$(".plusOption").remove();
};
//탭 눌렀을때 색변경 콘텐츠 변경
$("main ul").eq(2).hide();
$("main ul" | identifier_body |
storeDetail.js |
$(document).ready(function() {
console.log(window.location.pathname.indexOf("admin"));
// 관리자페이지에서만 보이기
if (window.location.pathname.indexOf("admin") == 1) {
$("#admin_button_box").css("display", "block");
}
let cartStoreNum = null; // 카트에 담긴 메뉴의 가게번호, 서로 다른가게에서 담으면 안됨
let size = $(window).width();
$(window).resize(function() {
size = $(window).width();
console.log(size);
})
// -------------------- 주문하기 버튼 클릭 -----------------------
$(".order_btn").click(function() {
location.href = "/order";
});
$(".cart_img_box").click(function() {
location.href = "/order";
});
// -------------------- 주문하기 버튼 클릭 -----------------------
// -------------------- 가게 별점 --------------------
let score = Math.round($("#score").val());
if (score <= 0) {
score = 0;
}
$(".score_box i").eq(score).addClass("fas").prevAll().addClass("fas");
// -------------------- 가게 별점 --------------------
// -------------------- 리뷰탭 그래프 --------------------
const reviewCount = $("#review_count").val();
const fiveScore = $("#five_score").val() / reviewCount * 100 + "%";
const fourScore = $("#four_score").val() / reviewCount * 100 + "%";
const threeScore = $("#three_score").val() / reviewCount * 100 + "%";
const twoScore = $("#two_score").val() / reviewCount * 100 + "%";
const oneScore = $("#one_score").val() / reviewCount * 100 + "%";
$(".graph.five").css("background", "gold").css("width", fiveScore);
$(".graph.four").css("background", "gold").css("width", fourScore);
$(".graph.three").css("background", "gold").css("width", threeScore);
$(".graph.two").css("background", "gold").css("width", twoScore);
$(".graph.one").css("background", "gold").css("width", oneScore);
// -------------------- 리뷰탭 그래프 --------------------
// -------------------- 찜 하기 --------------------
const storeNum = $(".store_num").val();
const userId = $(".user_id").val();
let dibsCheck = $(".dibs_check").val(); // 찜 여부
$(".fa-heart").click(function() {
if (dibsCheck == 0) { // 찜 안되있을때
$(this).removeClass("far");
$(this).addClass("fas");
dibsCheck = 1;
if (userId != "guest") {
const count = Number($(".count").html());
$(".count").html(count + 1);
}
} else { // 찜 되있을대
$(this).removeClass("fas");
$(this).addClass("far");
dibsCheck = 0;
if (userId != "guest") {
const count = Number($(".count").html());
$(".count").html(count - 1);
}
}
dibs();
})
function dibs() {
let data = {
storeNum: storeNum
};
$.ajax({
url: "/dibs",
type: "post",
async: false,
data: data,
success: function(result) {
} // success
}); // ajax
};
// -------------------- 찜 하기 --------------------
// ------------------ 카트 리스트 ----------------------
cartList("/cartList", "");
// ----------------- 카트 리스트 ----------------------
// ------------------메뉴 리스트 클릭----------------------
const deleveryTip = $(".delevery_tip").val();
const storeName = $(".store_name").val();
let menuNum;
let menuName;
let menuDec;
let menuPrice = 0; // 1개 가격
let sum = 0; // 메뉴 1개 총 가격
// let total =0 ; // 장바구니 총 가격
const addCartModal = $(".addCart_modal");
const menuList = $(".menu > li .in"); //메뉴 리스트
menuList.click(function() { // 메뉴 모달창 열기
console.log("cartStoreNum =" + cartStoreNum);
openModal(addCartModal, size);
menuNum = $(this).find("#menu_num").val();
menuName = $(this).find("#menu_name").val();
menuPrice = Number($(this).find("#menu_price").val());
sum = menuPrice;
menuDec = $(this).find("#menu_dec").val();
let data = {
storeNum: storeNum,
menuNum: menuNum
};
$.ajax({
url: "/menuDetail",
type: "post",
async: false,
data: data,
success: function(result) {
console.log(result);
console.log(result["menuDetail"]["menuImg"]);
$(".menu_name").html(menuName);
$(".menu_dec").html(menuDec);
$(".menu_img").attr("src", result["menuDetail"]["menuImg"]);
$(".menu_price").html(menuPrice.toLocaleString() + "원");
$(".menu_sum").html(menuPrice.toLocaleString() + "원");
let ht = "";
$("#option ul").html(ht);
if (result["menuOption"] != null && result["menuOption"] != "") {
console.log("옵션");
$("#option").css("display", "block");
for (var i = 0; i < result["menuOption"].length; i++) {
ht += `<li>
<div class="option_box">
<span>
<input type="checkbox" class="menu_option" name="option" value="${result["menuOption"][i]["optionName"]}"> ${result["menuOption"][i]["optionName"]}
<input type="hidden" class="option_price" value="${result["menuOption"][i]["optionPrice"]}">
<input type="hidden" class="option_num" value="${result["menuOption"][i]["optionNum"]}">
</span>
<span> ${result["menuOption"][i]["optionPrice"].toLocaleString()}원</span>
</div>
</li>`;
}
$("#option ul").html(ht);
} else {
$("#option").css("display", "none");
}
}
}); // ajax
}); // 메뉴 클릭 메서드
/* ---------------------- 옵션 선택 --------------------- */
let optionPrice = 0;
$(document).on("change", "input[name='option']", function() {
if ($(this).is(":checked")) {
optionPrice += Number($(this).siblings(".option_price").val());
} else {
optionPrice -= Number($(this).siblings(".option_price").val());
}
sum = (menuPrice + optionPrice) * amount;
$(".menu_sum").html(sum.toLocaleString() + "원");
});
/* ---------------------- 옵션 선택 ------------------- | amountBox = $("#amount");
let amount = 1;
$(".amount_box button").click(function() {
if ($(this).hasClass("minus")) {
amountBox.val() == 1 ? amountBox.val(amountBox.val()) : amountBox.val(Number(amountBox.val()) - 1);
} else if ($(this).hasClass("plus")) {
amountBox.val(Number(amountBox.val()) + 1);
}
amount = amountBox.val(); // 현재 출력중인 숫자
sum = (menuPrice + optionPrice) * amount;
$(".menu_sum").html(sum.toLocaleString() + "원");
})
/* ----------------------수량 증가 감소--------------------- */
/* ---------------------- 장바구니에 담기 --------------------- */
var cartCount = 0; // 모바일 카트
$(".add_cart").click(function() {
if ((cartStoreNum == null) || (storeNum == cartStoreNum)) {
addCart();
return;
}
if (cartStoreNum != null && storeNum != cartStoreNum) {
swal({
buttons: ["취소", "담기"],
title: "장바구니에는 같은 가게의 메뉴만 담을 수 있습니다",
text: "선택하신 메뉴를 장바구니에 담을 경우 이전에 담은 메뉴가 삭제됩니다"
})
.then((value) => {
if (value == true) {
deleteCartAll();
addCart();
}
});
}
}); // 장바구니에 담기 버튼 클릭
/* ---------------------- 장바구니에 담기 --------------------- */
/* ---------------------- 장바구니 1개 삭제 --------------------- */
$(document).on("click", ".cancle_btn", function() { // .html로 생성된 부분은 on 메서드를 사용해야된다
const index = $(this).parent().index();
console.log("index = " + index);
let data = {
index: index
};
cartList("/deleteCartOne", data);
}); // 삭제 버튼 클릭
/* ---------------------- 장바구니 1개 삭제 --------------------- */
/* ---------------------- 장바구니 전체 삭제 --------------------- */
$(".deleteAll").click(function() {
deleteCartAll();
}); // 전체 삭제
/* ---------------------- 장바구니 전체 삭제 --------------------- */
function cartEmpryCheck(total) {
const minDelevery = Number($("#min_delevery").val());
if ($(".cart ul ").html() != "") {
if (total < minDelevery) {
$(".order_btn").attr("disabled", true);
$(".order_btn").css("background", "#ddd");
$(".order_btn").html(`${minDelevery.toLocaleString()}원 이상 주문할 수 있습니다`);
} else {
$(".order_btn").attr("disabled", false); // 상품을 담으면 주문하기버튼 활성화
$(".order_btn").css("background", "#30DAD9");
}
} else {
$(".order_btn").attr("disabled", true);
$(".order_btn").css("background", "#ddd");
$(".order_btn").html("주문하기");
cartStoreNum = null;
}
};
let total = $(".total").data("total");
function cartList(url, data) {
$.ajax({
url: url,
type: "post",
data: data,
async: false,
traditional: true,
success: function(result) {
console.log(result);
let ht = "";
let total = 0;
if (result.length == 0) {
$(".total").html("장바구니가 비었습니다.");
$(".cart ul").html(ht); // 장바구니 목록 초기화
cartEmpryCheck(total);
return;
}
for (var i = 0; i < result.length; i++) {
let optionHt = "";
let optionTotal = 0;
if (result[i]["menuOption"] != null && result[i]["menuOption"] != "") {
for (var j = 0; j < result[i]["menuOption"].length; j++) {
let menuOption = result[i]["menuOption"][j];
let menuOptionPrice = Number(result[i]["menuOptionPrice"][j]);
optionHt += "<div class='cart_menu_option'>" + menuOption + " " + menuOptionPrice.toLocaleString() + "원</div>";
optionTotal += menuOptionPrice;
}
}
ht += `<li>
<h3>${result[i]["menuName"]}</h3>
<div>${result[i]["menuPrice"].toLocaleString()}원</div>
<div>수량 : ${result[i]["amount"]}</div>
<div> ${optionHt} </div>
<div>합계 : ${((result[i]["menuPrice"] + optionTotal) * result[i]["amount"]).toLocaleString()}원</div>
<button class="cancle_btn"> ${"x"} </button>
</li>`; // 장바구니 추가하면 장바구니 리스트 변경
total += (result[i]["menuPrice"] + optionTotal) * result[i]["amount"];
}
$(".cart ul").html(ht);
if (!result) {
$(".total").html("장바구니가 비었습니다.");
} else {
$(".total").html("총 합계 : " + total.toLocaleString() + "원");
}
cartEmpryCheck(total);
/* 모바일 카트 */
if (result.length > 0) {
$(".cart_count").html(result.length);
$(".cart_count").css("display", "block");
cartCount = result.length;
}
/* 모바일 카트 */
$(".total").data("total", total); // 전역변수에 세팅
cartStoreNum = result[0]["storeNum"];
} // success
}); // ajax
};
function addCart() {
const menuOption = new Array();
const menuOptionPrice = new Array();
$("input[type='checkBox']:checked").each(function() {
menuOption.push($(this).val());
menuOptionPrice.push($(this).siblings(".option_price").val());
})
console.log("StoreName = " + storeName);
console.log("menuOption= " + menuOption);
console.log("menuOptionPrice= " + menuOptionPrice);
let data = {
menuName: menuName,
storeNum: storeNum,
storeName: storeName,
menuNum: menuNum,
menuPrice: menuPrice,
amount: amount,
deleveryTip: deleveryTip,
menuOption: menuOption,
menuOptionPrice: menuOptionPrice
};
cartList("/addCart", data);
swal("장바구니에 추가되었습니다.", "", "success", {
timer: 800,
buttons: "확인"
});
console.log("카트 상품 수 = " + cartCount);
$("#amount").val(1);
cartCount += 1; // 모바일 카트
amount = 1;
optionPrice = 0;
closeModal();
}
function deleteCartAll() {
$.ajax({
url: "/deleteCartAll",
type: "post",
async: false,
success: function() {
$(".cart ul ").html("");
cartEmpryCheck(total);
$(".total").html("장바구니가 비었습니다.");
}
}); // ajax
}
const modal = $(".modal");
const modalBg = $(".modal_bg");
const closeA = $(".closeA");
const closeB = $(".closeB");
const modal2 = $(".modal2"); // 추가
const cancle = $(".cancle");
closeB.click(function() {
closeModal();
});
modalBg.click(function() {
closeModal();
});
closeA.click(function() {
closeModal();
});
$(".addCart").click(function() {
closeModal();
}); // 장바구니에 담기 버튼 클릭
$("#accept").click(function() {
closeModal();
});
cancle.click(function() {
closeModal();
})
function closeModal() {
modal.scrollTop(0);
modal2.scrollTop(0);
modalBg.hide();
modal.css("top", "100%");
$(".modal_box").scrollTop(0);
$("body").css("overflow", "visible");
$("#amount").val(1);
optionPrice = 0;
/* $("input[type='checkBox']").prop("checked", false); */
$(".plusOption").remove();
};
//탭 눌렀을때 색변경 콘텐츠 변경
$("main ul").eq(2).hide();
$("main ul").eq(3).hide();
const tab = $("ul.tab > li");
const menu = $(".menu > li");
tab.click(function() {
const index = $(this).index() + 1;
tab.removeClass("select");
$(this).addClass("select");
$("main ul").eq(1).hide();
$("main ul").eq(2).hide();
$("main ul").eq(3).hide();
$("main ul").eq(index).show();
const offset = $(".offset").offset();
const scrollPosition = $(document).scrollTop();
console.log(offset);
console.log(offset["top"]);
if (offset["top"] < scrollPosition) {
$("html").animate({ scrollTop: offset.top }, 100);
}
});
//탭 눌렀을때 색변경 콘텐츠 변경
/* ---------------------------- 이미지 확대 ---------------------------- */
let zoom = false;
$(".review_img").click(function() {
if (zoom == false) {
$(this).css("transition", "0.3s").css("width", "100%");
zoom = true;
} else {
$(this).css("transition", "0.3s").css("width", "30%");
zoom = false;
}
});
/* ---------------------------- 이미지 확대 ---------------------------- */
}); | -- */
/* ---------------------수량 증가 감소--------------------- */
const | conditional_block |
storeDetail.js |
$(document).ready(function() {
console.log(window.location.pathname.indexOf("admin"));
// 관리자페이지에서만 보이기
if (window.location.pathname.indexOf("admin") == 1) {
$("#admin_button_box").css("display", "block");
}
let cartStoreNum = null; // 카트에 담긴 메뉴의 가게번호, 서로 다른가게에서 담으면 안됨
let size = $(window).width();
$(window).resize(function() {
size = $(window).width();
console.log(size);
})
// -------------------- 주문하기 버튼 클릭 -----------------------
$(".order_btn").click(function() {
location.href = "/order";
});
$(".cart_img_box").click(function() {
location.href = "/order";
});
// -------------------- 주문하기 버튼 클릭 -----------------------
// -------------------- 가게 별점 --------------------
let score = Math.round($("#score").val());
if (score <= 0) {
score = 0;
}
$(".score_box i").eq(score).addClass("fas").prevAll().addClass("fas");
// -------------------- 가게 별점 --------------------
// -------------------- 리뷰탭 그래프 --------------------
const reviewCount = $("#review_count").val();
const fiveScore = $("#five_score").val() / reviewCount * 100 + "%";
const fourScore = $("#four_score").val() / reviewCount * 100 + "%";
const threeScore = $("#three_score").val() / reviewCount * 100 + "%";
const twoScore = $("#two_score").val() / reviewCount * 100 + "%";
const oneScore = $("#one_score").val() / reviewCount * 100 + "%";
$(".graph.five").css("background", "gold").css("width", fiveScore);
$(".graph.four").css("background", "gold").css("width", fourScore);
$(".graph.three").css("background", "gold").css("width", threeScore);
$(".graph.two").css("background", "gold").css("width", twoScore);
$(".graph.one").css("background", "gold").css("width", oneScore);
// -------------------- 리뷰탭 그래프 --------------------
// -------------------- 찜 하기 --------------------
const storeNum = $(".store_num").val();
const userId = $(".user_id").val();
let dibsCheck = $(".dibs_check").val(); // 찜 여부
$(".fa-heart").click(function() {
if (dibsCheck == 0) { // 찜 안되있을때
$(this).removeClass("far");
$(this).addClass("fas");
dibsCheck = 1;
if (userId != "guest") {
const count = Number($(".count").html());
$(".count").html(count + 1);
}
} else { // 찜 되있을대
$(this).removeClass("fas");
$(this).addClass("far");
dibsCheck = 0;
if (userId != "guest") {
const count = Number($(".count").html());
$(".count").html(count - 1);
}
}
dibs();
})
function dibs() {
let data = {
storeNum: storeNum
};
$.ajax({
url: "/dibs",
type: "post",
async: false,
data: data,
success: function(result) {
} // success
}); // ajax
};
// -------------------- 찜 하기 --------------------
// ------------------ 카트 리스트 ----------------------
cartList("/cartList", "");
// ----------------- 카트 리스트 ----------------------
// ------------------메뉴 리스트 클릭----------------------
const deleveryTip = $(".delevery_tip").val();
const storeName = $(".store_name").val();
let menuNum;
let menuName;
let menuDec;
let menuPrice = 0; // 1개 가격
let sum = 0; // 메뉴 1개 총 가격
// let total =0 ; // 장바구니 총 가격
const addCartModal = $(".addCart_modal");
const menuList = $(".menu > li .in"); //메뉴 리스트
menuList.click(function() { // 메뉴 모달창 열기
console.log("cartStoreNum =" + cartStoreNum);
openModal(addCartModal, size);
menuNum = $(this).find("#menu_num").val();
menuName = $(this).find("#menu_name").val();
menuPrice = Number($(this).find("#menu_price").val());
sum = menuPrice;
menuDec = $(this).find("#menu_dec").val();
let data = {
storeNum: storeNum,
menuNum: menuNum
};
$.ajax({
url: "/menuDetail",
type: "post",
async: false,
data: data,
success: function(result) {
console.log(result);
console.log(result["menuDetail"]["menuImg"]);
$(".menu_name").html(menuName);
$(".menu_dec").html(menuDec);
$(".menu_img").attr("src", result["menuDetail"]["menuImg"]);
$(".menu_price").html(menuPrice.toLocaleString() + "원");
$(".menu_sum").html(menuPrice.toLocaleString() + "원");
let ht = "";
$("#option ul").html(ht);
if (result["menuOption"] != null && result["menuOption"] != "") {
console.log("옵션");
$("#option").css("display", "block");
for (var i = 0; i < result["menuOption"].length; i++) {
ht += `<li>
<div class="option_box">
<span>
<input type="checkbox" class="menu_option" name="option" value="${result["menuOption"][i]["optionName"]}"> ${result["menuOption"][i]["optionName"]}
<input type="hidden" class="option_price" value="${result["menuOption"][i]["optionPrice"]}">
<input type="hidden" class="option_num" value="${result["menuOption"][i]["optionNum"]}">
</span>
<span> ${result["menuOption"][i]["optionPrice"].toLocaleString()}원</span>
</div>
</li>`;
}
$("#option ul").html(ht);
} else {
$("#option").css("display", "none");
}
}
}); // ajax
}); // 메뉴 클릭 메서드
/* ---------------------- 옵션 선택 --------------------- */
let optionPrice = 0;
$(document).on("change", "input[name='option']", function() {
if ($(this).is(":checked")) {
optionPrice += Number($(this).siblings(".option_price").val());
} else {
optionPrice -= Number($(this).siblings(".option_price").val());
}
sum = (menuPrice + optionPrice) * amount;
$(".menu_sum").html(sum.toLocaleString() + "원");
});
/* ---------------------- 옵션 선택 --------------------- */
/* ---------------------수량 증가 감소--------------------- */
const amountBox = $("#amount");
let amount = 1;
$(".amount_box button").click(function() {
if ($(this).hasClass("minus")) {
amountBox.val() == 1 ? amountBox.val(amountBox.val()) : amountBox.val(Number(amountBox.val()) - 1);
} else if ($(this).hasClass("plus")) {
amountBox.val(Number(amountBox.val()) + 1);
}
amount = amountBox.val(); // 현재 출력중인 숫자
sum = (menuPrice + optionPrice) * amount;
$(".menu_sum").html(sum.toLocaleString() + "원");
})
/* ----------------------수량 증가 감소--------------------- */
/* ---------------------- 장바구니에 담기 --------------------- */
var cartCount = 0; // 모바일 카트
$(".add_cart").click(function() {
if ((cartStoreNum == null) || (storeNum == cartStoreNum)) {
addCart();
return;
}
if (cartStoreNum != null && storeNum != cartStoreNum) {
swal({
buttons: ["취소", "담기"],
title: "장바구니에는 같은 가게의 메뉴만 담을 수 있습니다",
text: "선택하신 메뉴를 장바구니에 담을 경우 이전에 담은 메뉴가 삭제됩니다"
})
.then((value) => {
if (value == true) {
deleteCartAll();
addCart();
}
});
}
}); // 장바구니에 담기 버튼 클릭
/* ---------------------- 장바구니에 담기 --------------------- */
/* ---------------------- 장바구니 1개 삭제 --------------------- */
$(document).on("click", ".cancle_btn", function() { // .html로 생성된 부분은 on 메서드를 사용해야된다
const index = $(this).parent().index();
console.log("index = " + index);
let data = {
index: index
};
cartList("/deleteCartOne", data);
}); // 삭제 버튼 클릭
/* ---------------------- 장바구니 1개 삭제 --------------------- */
/* ---------------------- 장바구니 전체 삭제 --------------------- */
$(".deleteAll").click(function() {
deleteCartAll();
}); // 전체 삭제
/* ---------------------- 장바구니 전체 삭제 --------------------- */
function cartEmpryCheck(total) {
const minDelevery = Number($("#min_delevery").val());
if ($(".cart ul ").html() != "") {
if (total < minDelevery) {
$(".order_btn").attr("disabled", true);
$(".order_btn").css("background", "#ddd");
$(".order_btn").html(`${minDelevery.toLocaleString()}원 이상 주문할 수 있습니다`);
} else {
$(".order_btn").attr("disabled", false); // 상품을 담으면 주문하기버튼 활성화
$(".order_btn").css("background", "#30DAD9");
}
} else {
$(".order_btn").attr("disabled", true);
$(".order_btn").css("background", "#ddd");
$(".order_btn").html("주문하기");
cartStoreNum = null;
}
};
let total | data("total");
function cartList(url, data) {
$.ajax({
url: url,
type: "post",
data: data,
async: false,
traditional: true,
success: function(result) {
console.log(result);
let ht = "";
let total = 0;
if (result.length == 0) {
$(".total").html("장바구니가 비었습니다.");
$(".cart ul").html(ht); // 장바구니 목록 초기화
cartEmpryCheck(total);
return;
}
for (var i = 0; i < result.length; i++) {
let optionHt = "";
let optionTotal = 0;
if (result[i]["menuOption"] != null && result[i]["menuOption"] != "") {
for (var j = 0; j < result[i]["menuOption"].length; j++) {
let menuOption = result[i]["menuOption"][j];
let menuOptionPrice = Number(result[i]["menuOptionPrice"][j]);
optionHt += "<div class='cart_menu_option'>" + menuOption + " " + menuOptionPrice.toLocaleString() + "원</div>";
optionTotal += menuOptionPrice;
}
}
ht += `<li>
<h3>${result[i]["menuName"]}</h3>
<div>${result[i]["menuPrice"].toLocaleString()}원</div>
<div>수량 : ${result[i]["amount"]}</div>
<div> ${optionHt} </div>
<div>합계 : ${((result[i]["menuPrice"] + optionTotal) * result[i]["amount"]).toLocaleString()}원</div>
<button class="cancle_btn"> ${"x"} </button>
</li>`; // 장바구니 추가하면 장바구니 리스트 변경
total += (result[i]["menuPrice"] + optionTotal) * result[i]["amount"];
}
$(".cart ul").html(ht);
if (!result) {
$(".total").html("장바구니가 비었습니다.");
} else {
$(".total").html("총 합계 : " + total.toLocaleString() + "원");
}
cartEmpryCheck(total);
/* 모바일 카트 */
if (result.length > 0) {
$(".cart_count").html(result.length);
$(".cart_count").css("display", "block");
cartCount = result.length;
}
/* 모바일 카트 */
$(".total").data("total", total); // 전역변수에 세팅
cartStoreNum = result[0]["storeNum"];
} // success
}); // ajax
};
function addCart() {
const menuOption = new Array();
const menuOptionPrice = new Array();
$("input[type='checkBox']:checked").each(function() {
menuOption.push($(this).val());
menuOptionPrice.push($(this).siblings(".option_price").val());
})
console.log("StoreName = " + storeName);
console.log("menuOption= " + menuOption);
console.log("menuOptionPrice= " + menuOptionPrice);
let data = {
menuName: menuName,
storeNum: storeNum,
storeName: storeName,
menuNum: menuNum,
menuPrice: menuPrice,
amount: amount,
deleveryTip: deleveryTip,
menuOption: menuOption,
menuOptionPrice: menuOptionPrice
};
cartList("/addCart", data);
swal("장바구니에 추가되었습니다.", "", "success", {
timer: 800,
buttons: "확인"
});
console.log("카트 상품 수 = " + cartCount);
$("#amount").val(1);
cartCount += 1; // 모바일 카트
amount = 1;
optionPrice = 0;
closeModal();
}
function deleteCartAll() {
$.ajax({
url: "/deleteCartAll",
type: "post",
async: false,
success: function() {
$(".cart ul ").html("");
cartEmpryCheck(total);
$(".total").html("장바구니가 비었습니다.");
}
}); // ajax
}
const modal = $(".modal");
const modalBg = $(".modal_bg");
const closeA = $(".closeA");
const closeB = $(".closeB");
const modal2 = $(".modal2"); // 추가
const cancle = $(".cancle");
closeB.click(function() {
closeModal();
});
modalBg.click(function() {
closeModal();
});
closeA.click(function() {
closeModal();
});
$(".addCart").click(function() {
closeModal();
}); // 장바구니에 담기 버튼 클릭
$("#accept").click(function() {
closeModal();
});
cancle.click(function() {
closeModal();
})
function closeModal() {
modal.scrollTop(0);
modal2.scrollTop(0);
modalBg.hide();
modal.css("top", "100%");
$(".modal_box").scrollTop(0);
$("body").css("overflow", "visible");
$("#amount").val(1);
optionPrice = 0;
/* $("input[type='checkBox']").prop("checked", false); */
$(".plusOption").remove();
};
//탭 눌렀을때 색변경 콘텐츠 변경
$("main ul").eq(2).hide();
$("main ul").eq(3).hide();
const tab = $("ul.tab > li");
const menu = $(".menu > li");
tab.click(function() {
const index = $(this).index() + 1;
tab.removeClass("select");
$(this).addClass("select");
$("main ul").eq(1).hide();
$("main ul").eq(2).hide();
$("main ul").eq(3).hide();
$("main ul").eq(index).show();
const offset = $(".offset").offset();
const scrollPosition = $(document).scrollTop();
console.log(offset);
console.log(offset["top"]);
if (offset["top"] < scrollPosition) {
$("html").animate({ scrollTop: offset.top }, 100);
}
});
//탭 눌렀을때 색변경 콘텐츠 변경
/* ---------------------------- 이미지 확대 ---------------------------- */
let zoom = false;
$(".review_img").click(function() {
if (zoom == false) {
$(this).css("transition", "0.3s").css("width", "100%");
zoom = true;
} else {
$(this).css("transition", "0.3s").css("width", "30%");
zoom = false;
}
});
/* ---------------------------- 이미지 확대 ---------------------------- */
}); | = $(".total"). | identifier_name |
memcached_test.go | // Copyright 2018 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/ghodss/yaml"
"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
"github.com/operator-framework/operator-sdk/internal/util/fileutil"
"github.com/operator-framework/operator-sdk/internal/util/projutil"
"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
"github.com/prometheus/prometheus/util/promlint"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
crYAML string = "apiVersion: \"cache.example.com/v1alpha1\"\nkind: \"Memcached\"\nmetadata:\n name: \"example-memcached\"\nspec:\n size: 3"
retryInterval = time.Second * 5
timeout = time.Second * 120
cleanupRetryInterval = time.Second * 1
cleanupTimeout = time.Second * 10
operatorName = "memcached-operator"
)
func TestMemcached(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
gopath, ok := os.LookupEnv(projutil.GopathEnv)
if !ok {
t.Fatalf("$GOPATH not set")
}
cd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
defer func() {
if err := os.Chdir(cd); err != nil {
t.Errorf("Failed to change back to original working directory: (%v)", err)
}
}()
// Setup
absProjectPath := filepath.Join(gopath, "src/github.com/example-inc")
if err := os.MkdirAll(absProjectPath, fileutil.DefaultDirFileMode); err != nil {
t.Fatal(err)
}
if err := os.Chdir(absProjectPath); err != nil {
t.Fatal(err)
}
t.Log("Creating new operator project")
cmdOut, err := exec.Command("operator-sdk",
"new",
operatorName).CombinedOutput()
if err != nil {
// HACK: dep cannot resolve non-master branches as the base branch for PR's,
// so running `dep ensure` will fail when first running
// `operator-sdk new ...`. For now we can ignore the first solve failure.
// A permanent solution can be implemented once the following is merged:
// https://github.com/golang/dep/pull/1658
solveFailRe := regexp.MustCompile(`(?m)^[ \t]*Solving failure:.+github\.com/operator-framework/operator-sdk.+:$`)
if !solveFailRe.Match(cmdOut) {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
}
ctx.AddCleanupFn(func() error { return os.RemoveAll(absProjectPath) })
if err := os.Chdir(operatorName); err != nil {
t.Fatalf("Failed to change to %s directory: (%v)", operatorName, err)
}
repo, ok := os.LookupEnv("TRAVIS_PULL_REQUEST_SLUG")
if repo == "" {
repo, ok = os.LookupEnv("TRAVIS_REPO_SLUG")
}
if ok && repo != "" && repo != "operator-framework/operator-sdk" {
commitSha, ok := os.LookupEnv("TRAVIS_PULL_REQUEST_SHA")
if commitSha == "" {
commitSha, ok = os.LookupEnv("TRAVIS_COMMIT")
}
if ok && commitSha != "" {
gopkg, err := ioutil.ReadFile("Gopkg.toml")
if err != nil {
t.Fatal(err)
}
// Match against the '#osdk_branch_annotation' used for version substitution
// and comment out the current branch.
branchRe := regexp.MustCompile("([ ]+)(.+#osdk_branch_annotation)")
gopkg = branchRe.ReplaceAll(gopkg, []byte("$1# $2"))
versionRe := regexp.MustCompile("([ ]+)(.+#osdk_version_annotation)")
gopkg = versionRe.ReplaceAll(gopkg, []byte("$1# $2"))
// Plug in the fork to test against so `dep ensure` can resolve dependencies
// correctly.
gopkgString := string(gopkg)
gopkgLoc := strings.LastIndex(gopkgString, "\n name = \"github.com/operator-framework/operator-sdk\"\n")
gopkgString = gopkgString[:gopkgLoc] + "\n source = \"https://github.com/" + repo + "\"\n revision = \"" + commitSha + "\"\n" + gopkgString[gopkgLoc+1:]
err = ioutil.WriteFile("Gopkg.toml", []byte(gopkgString), fileutil.DefaultFileMode)
if err != nil {
t.Fatalf("Failed to write updated Gopkg.toml: %v", err)
}
t.Logf("Gopkg.toml: %v", gopkgString)
} else {
t.Fatal("Could not find sha of PR")
}
}
cmdOut, err = exec.Command("dep", "ensure").CombinedOutput()
if err != nil {
t.Fatalf("Error after modifying Gopkg.toml: %v\nCommand Output: %s\n", err, string(cmdOut))
}
// Set replicas to 2 to test leader election. In production, this should
// almost always be set to 1, because there isn't generally value in having
// a hot spare operator process.
opYaml, err := ioutil.ReadFile("deploy/operator.yaml")
if err != nil {
t.Fatalf("Could not read deploy/operator.yaml: %v", err)
}
newOpYaml := bytes.Replace(opYaml, []byte("replicas: 1"), []byte("replicas: 2"), 1)
err = ioutil.WriteFile("deploy/operator.yaml", newOpYaml, 0644)
if err != nil {
t.Fatalf("Could not write deploy/operator.yaml: %v", err)
}
cmd := exec.Command("operator-sdk",
"add",
"api",
"--api-version=cache.example.com/v1alpha1",
"--kind=Memcached")
cmd.Env = os.Environ()
cmdOut, err = cmd.CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
cmdOut, err = exec.Command("operator-sdk",
"add",
"controller",
"--api-version=cache.example.com/v1alpha1",
"--kind=Memcached").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
cmdOut, err = exec.Command("cp", "-a", filepath.Join(gopath, "src/github.com/operator-framework/operator-sdk/example/memcached-operator/memcached_controller.go.tmpl"),
"pkg/controller/memcached/memcached_controller.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not copy memcached example to to pkg/controller/memcached/memcached_controller.go: %v\nCommand Output:\n%v", err, string(cmdOut))
}
memcachedTypesFile, err := ioutil.ReadFile("pkg/apis/cache/v1alpha1/memcached_types.go")
if err != nil {
t.Fatal(err)
}
memcachedTypesFileLines := bytes.Split(memcachedTypesFile, []byte("\n"))
for lineNum, line := range memcachedTypesFileLines {
if strings.Contains(string(line), "type MemcachedSpec struct {") {
memcachedTypesFileLinesIntermediate := append(memcachedTypesFileLines[:lineNum+1], []byte("\tSize int32 `json:\"size\"`"))
memcachedTypesFileLines = append(memcachedTypesFileLinesIntermediate, memcachedTypesFileLines[lineNum+3:]...)
break
}
}
for lineNum, line := range memcachedTypesFileLines {
if strings.Contains(string(line), "type MemcachedStatus struct {") {
memcachedTypesFileLinesIntermediate := append(memcachedTypesFileLines[:lineNum+1], []byte("\tNodes []string `json:\"nodes\"`"))
memcachedTypesFileLines = append(memcachedTypesFileLinesIntermediate, memcachedTypesFileLines[lineNum+3:]...)
break
}
}
if err := os.Remove("pkg/apis/cache/v1alpha1/memcached_types.go"); err != nil {
t.Fatalf("Failed to remove old memcached_type.go file: (%v)", err)
}
err = ioutil.WriteFile("pkg/apis/cache/v1alpha1/memcached_types.go", bytes.Join(memcachedTypesFileLines, []byte("\n")), fileutil.DefaultFileMode)
if err != nil {
t.Fatal(err)
}
t.Log("Generating k8s")
cmdOut, err = exec.Command("operator-sdk", "generate", "k8s").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
t.Log("Copying test files to ./test")
if err = os.MkdirAll("./test", fileutil.DefaultDirFileMode); err != nil {
t.Fatalf("Could not create test/e2e dir: %v", err)
}
cmdOut, err = exec.Command("cp", "-a", filepath.Join(gopath, "src/github.com/operator-framework/operator-sdk/test/e2e/incluster-test-code"), "./test/e2e").CombinedOutput()
if err != nil {
t.Fatalf("Could not copy tests to test/e2e: %v\nCommand Output:\n%v", err, string(cmdOut))
}
// fix naming of files
cmdOut, err = exec.Command("mv", "test/e2e/main_test.go.tmpl", "test/e2e/main_test.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not rename test/e2e/main_test.go.tmpl: %v\nCommand Output:\n%v", err, string(cmdOut))
}
cmdOut, err = exec.Command("mv", "test/e2e/memcached_test.go.tmpl", "test/e2e/memcached_test.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not rename test/e2e/memcached_test.go.tmpl: %v\nCommand Output:\n%v", err, string(cmdOut))
}
t.Log("Pulling new dependencies with dep ensure")
cmdOut, err = exec.Command("dep", "ensure").CombinedOutput()
if err != nil {
t.Fatalf("Command 'dep ensure' failed: %v\nCommand Output:\n%v", err, string(cmdOut))
}
// link local sdk to vendor if not in travis
if repo == "" {
for _, dir := range []string{"pkg", "internal"} {
repoDir := filepath.Join("github.com/operator-framework/operator-sdk", dir)
vendorDir := filepath.Join("vendor", repoDir)
if err := os.RemoveAll(vendorDir); err != nil {
t.Fatalf("Failed to delete old vendor directory: (%v)", err)
}
if err := os.Symlink(filepath.Join(gopath, projutil.SrcDir, repoDir), vendorDir); err != nil {
t.Fatalf("Failed to symlink local operator-sdk project to vendor dir: (%v)", err)
}
}
}
file, err := yamlutil.GenerateCombinedGlobalManifest(scaffold.CRDsDir)
if err != nil {
t.Fatal(err)
}
// hacky way to use createFromYAML without exposing the method
// create crd
filename := file.Name()
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created global resources")
// run subtests
t.Run("memcached-group", func(t *testing.T) {
t.Run("Cluster", MemcachedCluster)
t.Run("ClusterTest", MemcachedClusterTest)
t.Run("Local", MemcachedLocal)
})
}
func memcachedLeaderTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
return err
}
label := map[string]string{"name": operatorName}
leader, err := verifyLeader(t, namespace, f, label)
if err != nil {
return err
}
// delete the leader's pod so a new leader will get elected
err = f.Client.Delete(context.TODO(), leader)
if err != nil {
return err
}
err = e2eutil.WaitForDeletion(t, f.Client.Client, leader, retryInterval, timeout)
if err != nil {
return err
}
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
return err
}
newLeader, err := verifyLeader(t, namespace, f, label)
if err != nil {
return err
}
if newLeader.Name == leader.Name {
return fmt.Errorf("leader pod name did not change across pod delete")
}
return nil
}
func verifyLeader(t *testing.T, namespace string, f *framework.Framework, labels map[string]string) (*v1.Pod, error) {
// get configmap, which is the lock
lockName := "memcached-operator-lock"
lock := v1.ConfigMap{}
err := wait.Poll(retryInterval, timeout, func() (done bool, err error) {
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: lockName, Namespace: namespace}, &lock)
if err != nil {
if apierrors.IsNotFound(err) {
t.Logf("Waiting for availability of leader lock configmap %s\n", lockName)
return false, nil
}
return false, err
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("error getting leader lock configmap: %v\n", err)
}
t.Logf("Found leader lock configmap %s\n", lockName)
owners := lock.GetOwnerReferences()
if len(owners) != 1 {
return nil, fmt.Errorf("leader lock has %d owner refs, expected 1", len(owners))
}
owner := owners[0]
// get operator pods
pods := v1.PodList{}
opts := client.ListOptions{Namespace: namespace}
for k, v := range labels {
if err := opts.SetLabelSelector(fmt.Sprintf("%s=%s", k, v)); err != nil {
return nil, fmt.Errorf("failed to set list label selector: (%v)", err)
}
}
if err := opts.SetFieldSelector("status.phase=Running"); err != nil {
t.Fatalf("Failed to set list field selector: (%v)", err)
}
err = f.Client.List(context.TODO(), &opts, &pods)
if err != nil {
return nil, err
}
if len(pods.Items) != 2 {
return nil, fmt.Errorf("expected 2 pods, found %d", len(pods.Items))
}
// find and return the leader
for _, pod := range pods.Items {
if pod.Name == owner.Name {
return &pod, nil
}
}
return nil, fmt.Errorf("did not find operator pod that was referenced by configmap")
}
func memcachedScaleTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
// create example-memcached yaml file
filename := "deploy/cr.yaml"
err := ioutil.WriteFile(filename,
[]byte(crYAML),
fileutil.DefaultFileMode)
if err != nil {
return err
}
// create memcached custom resource
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
return err
}
t.Log("Created cr")
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
// wait for example-memcached to reach 3 replicas
err = e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-memcached", 3, retryInterval, timeout)
if err != nil {
return err
}
// get fresh copy of memcached object as unstructured
obj := unstructured.Unstructured{}
jsonSpec, err := yaml.YAMLToJSON([]byte(crYAML))
if err != nil {
return fmt.Errorf("could not convert yaml file to json: %v", err)
}
if err := obj.UnmarshalJSON(jsonSpec); err != nil {
t.Fatalf("Failed to unmarshal memcached CR: (%v)", err)
}
obj.SetNamespace(namespace)
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, &obj)
if err != nil {
return fmt.Errorf("failed to get memcached object: %s", err)
}
// update memcached CR size to 4
spec, ok := obj.Object["spec"].(map[string]interface{})
if !ok {
return errors.New("memcached object missing spec field")
}
spec["size"] = 4
err = f.Client.Update(context.TODO(), &obj)
if err != nil {
return err
}
// wait for example-memcached to reach 4 replicas
return e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-memcached", 4, retryInterval, timeout)
}
func MemcachedLocal(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
cmd := exec.Command("operator-sdk", "up", "local", "--namespace="+namespace)
stderr, err := os.Create("stderr.txt")
if err != nil {
t.Fatalf("Failed to create stderr.txt: %v", err)
}
cmd.Stderr = stderr
defer func() {
if err := stderr.Close(); err != nil && !fileutil.IsClosedError(err) {
t.Errorf("Failed to close stderr: (%v)", err)
}
}()
err = cmd.Start()
if err != nil {
t.Fatalf("Error: %v", err)
}
ctx.AddCleanupFn(func() error { return cmd.Process.Signal(os.Interrupt) })
// wait for operator to start (may take a minute to compile the command...)
err = wait.Poll(time.Second*5, time.Second*100, func() (done bool, err error) {
file, err := ioutil.ReadFile("stderr.txt")
if err != nil {
return false, err
}
if len(file) == 0 {
return false, nil
}
return true, nil
})
if err != nil {
t.Fatalf("Local operator not ready after 100 seconds: %v\n", err)
}
if err = memcachedScaleTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
}
func MemcachedCluster(t *testing.T) { | // get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
operatorYAML, err := ioutil.ReadFile("deploy/operator.yaml")
if err != nil {
t.Fatalf("Could not read deploy/operator.yaml: %v", err)
}
local := *e2eImageName == ""
if local {
*e2eImageName = "quay.io/example/memcached-operator:v0.0.1"
if err != nil {
t.Fatal(err)
}
operatorYAML = bytes.Replace(operatorYAML, []byte("imagePullPolicy: Always"), []byte("imagePullPolicy: Never"), 1)
err = ioutil.WriteFile("deploy/operator.yaml", operatorYAML, fileutil.DefaultFileMode)
if err != nil {
t.Fatal(err)
}
}
operatorYAML = bytes.Replace(operatorYAML, []byte("REPLACE_IMAGE"), []byte(*e2eImageName), 1)
err = ioutil.WriteFile("deploy/operator.yaml", operatorYAML, os.FileMode(0644))
if err != nil {
t.Fatalf("Failed to write deploy/operator.yaml: %v", err)
}
t.Log("Building operator docker image")
cmdOut, err := exec.Command("operator-sdk", "build", *e2eImageName,
"--enable-tests",
"--test-location", "./test/e2e",
"--namespaced-manifest", "deploy/operator.yaml").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
if !local {
t.Log("Pushing docker image to repo")
cmdOut, err = exec.Command("docker", "push", *e2eImageName).CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
}
file, err := yamlutil.GenerateCombinedNamespacedManifest(scaffold.DeployDir)
if err != nil {
t.Fatal(err)
}
// create namespaced resources
filename := file.Name()
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created namespaced resources")
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
// wait for memcached-operator to be ready
err = e2eutil.WaitForOperatorDeployment(t, framework.Global.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
t.Fatal(err)
}
if err = memcachedLeaderTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
if err = memcachedScaleTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
if err = memcachedMetricsTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
}
func MemcachedClusterTest(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
// create sa
filename := "deploy/service_account.yaml"
framework.Global.NamespacedManPath = &filename
err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created sa")
// create rbac
filename = "deploy/role.yaml"
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created role")
filename = "deploy/role_binding.yaml"
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created role_binding")
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatalf("Could not get namespace: %v", err)
}
cmdOut, err := exec.Command("operator-sdk", "test", "cluster", *e2eImageName,
"--namespace", namespace,
"--image-pull-policy", "Never",
"--service-account", operatorName).CombinedOutput()
if err != nil {
t.Fatalf("In-cluster test failed: %v\nCommand Output:\n%s", err, string(cmdOut))
}
}
func memcachedMetricsTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
// Make sure metrics Service exists
s := v1.Service{}
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: operatorName, Namespace: namespace}, &s)
if err != nil {
return fmt.Errorf("could not get metrics Service: (%v)", err)
}
// Get operator pod
pods := v1.PodList{}
opts := client.InNamespace(namespace)
if len(s.Spec.Selector) == 0 {
return fmt.Errorf("no labels found in metrics Service")
}
for k, v := range s.Spec.Selector {
if err := opts.SetLabelSelector(fmt.Sprintf("%s=%s", k, v)); err != nil {
return fmt.Errorf("failed to set list label selector: (%v)", err)
}
}
if err := opts.SetFieldSelector("status.phase=Running"); err != nil {
return fmt.Errorf("failed to set list field selector: (%v)", err)
}
err = f.Client.List(context.TODO(), opts, &pods)
if err != nil {
return fmt.Errorf("failed to get pods: (%v)", err)
}
podName := ""
numPods := len(pods.Items)
// TODO(lili): Remove below logic when we enable exposing metrics in all pods.
if numPods == 0 {
podName = pods.Items[0].Name
} else if numPods > 1 {
// If we got more than one pod, get leader pod name.
leader, err := verifyLeader(t, namespace, f, s.Spec.Selector)
if err != nil {
return err
}
podName = leader.Name
} else {
return fmt.Errorf("failed to get operator pod: could not select any pods with Service selector %v", s.Spec.Selector)
}
// Pod name must be there, otherwise we cannot read metrics data via pod proxy.
if podName == "" {
return fmt.Errorf("failed to get pod name")
}
// Get metrics data
request := proxyViaPod(f.KubeClient, namespace, podName, "8383", "/metrics")
response, err := request.DoRaw()
if err != nil {
return fmt.Errorf("failed to get response from metrics: %v", err)
}
// Make sure metrics are present
if len(response) == 0 {
return fmt.Errorf("metrics body is empty")
}
// Perform prometheus metrics lint checks
l := promlint.New(bytes.NewReader(response))
problems, err := l.Lint()
if err != nil {
return fmt.Errorf("failed to lint metrics: %v", err)
}
// TODO(lili): Change to 0, when we upgrade to 1.14.
// currently there is a problem with one of the metrics in upstream Kubernetes:
// `workqueue_longest_running_processor_microseconds`.
// This has been fixed in 1.14 release.
if len(problems) > 1 {
return fmt.Errorf("found problems with metrics: %#+v", problems)
}
return nil
}
func proxyViaPod(kubeClient kubernetes.Interface, namespace, podName, podPortName, path string) *rest.Request {
return kubeClient.
CoreV1().
RESTClient().
Get().
Namespace(namespace).
Resource("pods").
SubResource("proxy").
Name(fmt.Sprintf("%s:%s", podName, podPortName)).
Suffix(path)
} | random_line_split | |
memcached_test.go | // Copyright 2018 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/ghodss/yaml"
"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
"github.com/operator-framework/operator-sdk/internal/util/fileutil"
"github.com/operator-framework/operator-sdk/internal/util/projutil"
"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
"github.com/prometheus/prometheus/util/promlint"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
crYAML string = "apiVersion: \"cache.example.com/v1alpha1\"\nkind: \"Memcached\"\nmetadata:\n name: \"example-memcached\"\nspec:\n size: 3"
retryInterval = time.Second * 5
timeout = time.Second * 120
cleanupRetryInterval = time.Second * 1
cleanupTimeout = time.Second * 10
operatorName = "memcached-operator"
)
func TestMemcached(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
gopath, ok := os.LookupEnv(projutil.GopathEnv)
if !ok {
t.Fatalf("$GOPATH not set")
}
cd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
defer func() {
if err := os.Chdir(cd); err != nil {
t.Errorf("Failed to change back to original working directory: (%v)", err)
}
}()
// Setup
absProjectPath := filepath.Join(gopath, "src/github.com/example-inc")
if err := os.MkdirAll(absProjectPath, fileutil.DefaultDirFileMode); err != nil {
t.Fatal(err)
}
if err := os.Chdir(absProjectPath); err != nil {
t.Fatal(err)
}
t.Log("Creating new operator project")
cmdOut, err := exec.Command("operator-sdk",
"new",
operatorName).CombinedOutput()
if err != nil {
// HACK: dep cannot resolve non-master branches as the base branch for PR's,
// so running `dep ensure` will fail when first running
// `operator-sdk new ...`. For now we can ignore the first solve failure.
// A permanent solution can be implemented once the following is merged:
// https://github.com/golang/dep/pull/1658
solveFailRe := regexp.MustCompile(`(?m)^[ \t]*Solving failure:.+github\.com/operator-framework/operator-sdk.+:$`)
if !solveFailRe.Match(cmdOut) {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
}
ctx.AddCleanupFn(func() error { return os.RemoveAll(absProjectPath) })
if err := os.Chdir(operatorName); err != nil {
t.Fatalf("Failed to change to %s directory: (%v)", operatorName, err)
}
repo, ok := os.LookupEnv("TRAVIS_PULL_REQUEST_SLUG")
if repo == "" {
repo, ok = os.LookupEnv("TRAVIS_REPO_SLUG")
}
if ok && repo != "" && repo != "operator-framework/operator-sdk" {
commitSha, ok := os.LookupEnv("TRAVIS_PULL_REQUEST_SHA")
if commitSha == "" {
commitSha, ok = os.LookupEnv("TRAVIS_COMMIT")
}
if ok && commitSha != "" {
gopkg, err := ioutil.ReadFile("Gopkg.toml")
if err != nil {
t.Fatal(err)
}
// Match against the '#osdk_branch_annotation' used for version substitution
// and comment out the current branch.
branchRe := regexp.MustCompile("([ ]+)(.+#osdk_branch_annotation)")
gopkg = branchRe.ReplaceAll(gopkg, []byte("$1# $2"))
versionRe := regexp.MustCompile("([ ]+)(.+#osdk_version_annotation)")
gopkg = versionRe.ReplaceAll(gopkg, []byte("$1# $2"))
// Plug in the fork to test against so `dep ensure` can resolve dependencies
// correctly.
gopkgString := string(gopkg)
gopkgLoc := strings.LastIndex(gopkgString, "\n name = \"github.com/operator-framework/operator-sdk\"\n")
gopkgString = gopkgString[:gopkgLoc] + "\n source = \"https://github.com/" + repo + "\"\n revision = \"" + commitSha + "\"\n" + gopkgString[gopkgLoc+1:]
err = ioutil.WriteFile("Gopkg.toml", []byte(gopkgString), fileutil.DefaultFileMode)
if err != nil {
t.Fatalf("Failed to write updated Gopkg.toml: %v", err)
}
t.Logf("Gopkg.toml: %v", gopkgString)
} else {
t.Fatal("Could not find sha of PR")
}
}
cmdOut, err = exec.Command("dep", "ensure").CombinedOutput()
if err != nil {
t.Fatalf("Error after modifying Gopkg.toml: %v\nCommand Output: %s\n", err, string(cmdOut))
}
// Set replicas to 2 to test leader election. In production, this should
// almost always be set to 1, because there isn't generally value in having
// a hot spare operator process.
opYaml, err := ioutil.ReadFile("deploy/operator.yaml")
if err != nil {
t.Fatalf("Could not read deploy/operator.yaml: %v", err)
}
newOpYaml := bytes.Replace(opYaml, []byte("replicas: 1"), []byte("replicas: 2"), 1)
err = ioutil.WriteFile("deploy/operator.yaml", newOpYaml, 0644)
if err != nil {
t.Fatalf("Could not write deploy/operator.yaml: %v", err)
}
cmd := exec.Command("operator-sdk",
"add",
"api",
"--api-version=cache.example.com/v1alpha1",
"--kind=Memcached")
cmd.Env = os.Environ()
cmdOut, err = cmd.CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
cmdOut, err = exec.Command("operator-sdk",
"add",
"controller",
"--api-version=cache.example.com/v1alpha1",
"--kind=Memcached").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
cmdOut, err = exec.Command("cp", "-a", filepath.Join(gopath, "src/github.com/operator-framework/operator-sdk/example/memcached-operator/memcached_controller.go.tmpl"),
"pkg/controller/memcached/memcached_controller.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not copy memcached example to to pkg/controller/memcached/memcached_controller.go: %v\nCommand Output:\n%v", err, string(cmdOut))
}
memcachedTypesFile, err := ioutil.ReadFile("pkg/apis/cache/v1alpha1/memcached_types.go")
if err != nil {
t.Fatal(err)
}
memcachedTypesFileLines := bytes.Split(memcachedTypesFile, []byte("\n"))
for lineNum, line := range memcachedTypesFileLines {
if strings.Contains(string(line), "type MemcachedSpec struct {") {
memcachedTypesFileLinesIntermediate := append(memcachedTypesFileLines[:lineNum+1], []byte("\tSize int32 `json:\"size\"`"))
memcachedTypesFileLines = append(memcachedTypesFileLinesIntermediate, memcachedTypesFileLines[lineNum+3:]...)
break
}
}
for lineNum, line := range memcachedTypesFileLines {
if strings.Contains(string(line), "type MemcachedStatus struct {") {
memcachedTypesFileLinesIntermediate := append(memcachedTypesFileLines[:lineNum+1], []byte("\tNodes []string `json:\"nodes\"`"))
memcachedTypesFileLines = append(memcachedTypesFileLinesIntermediate, memcachedTypesFileLines[lineNum+3:]...)
break
}
}
if err := os.Remove("pkg/apis/cache/v1alpha1/memcached_types.go"); err != nil {
t.Fatalf("Failed to remove old memcached_type.go file: (%v)", err)
}
err = ioutil.WriteFile("pkg/apis/cache/v1alpha1/memcached_types.go", bytes.Join(memcachedTypesFileLines, []byte("\n")), fileutil.DefaultFileMode)
if err != nil {
t.Fatal(err)
}
t.Log("Generating k8s")
cmdOut, err = exec.Command("operator-sdk", "generate", "k8s").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
t.Log("Copying test files to ./test")
if err = os.MkdirAll("./test", fileutil.DefaultDirFileMode); err != nil {
t.Fatalf("Could not create test/e2e dir: %v", err)
}
cmdOut, err = exec.Command("cp", "-a", filepath.Join(gopath, "src/github.com/operator-framework/operator-sdk/test/e2e/incluster-test-code"), "./test/e2e").CombinedOutput()
if err != nil {
t.Fatalf("Could not copy tests to test/e2e: %v\nCommand Output:\n%v", err, string(cmdOut))
}
// fix naming of files
cmdOut, err = exec.Command("mv", "test/e2e/main_test.go.tmpl", "test/e2e/main_test.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not rename test/e2e/main_test.go.tmpl: %v\nCommand Output:\n%v", err, string(cmdOut))
}
cmdOut, err = exec.Command("mv", "test/e2e/memcached_test.go.tmpl", "test/e2e/memcached_test.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not rename test/e2e/memcached_test.go.tmpl: %v\nCommand Output:\n%v", err, string(cmdOut))
}
t.Log("Pulling new dependencies with dep ensure")
cmdOut, err = exec.Command("dep", "ensure").CombinedOutput()
if err != nil {
t.Fatalf("Command 'dep ensure' failed: %v\nCommand Output:\n%v", err, string(cmdOut))
}
// link local sdk to vendor if not in travis
if repo == "" {
for _, dir := range []string{"pkg", "internal"} {
repoDir := filepath.Join("github.com/operator-framework/operator-sdk", dir)
vendorDir := filepath.Join("vendor", repoDir)
if err := os.RemoveAll(vendorDir); err != nil {
t.Fatalf("Failed to delete old vendor directory: (%v)", err)
}
if err := os.Symlink(filepath.Join(gopath, projutil.SrcDir, repoDir), vendorDir); err != nil {
t.Fatalf("Failed to symlink local operator-sdk project to vendor dir: (%v)", err)
}
}
}
file, err := yamlutil.GenerateCombinedGlobalManifest(scaffold.CRDsDir)
if err != nil {
t.Fatal(err)
}
// hacky way to use createFromYAML without exposing the method
// create crd
filename := file.Name()
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created global resources")
// run subtests
t.Run("memcached-group", func(t *testing.T) {
t.Run("Cluster", MemcachedCluster)
t.Run("ClusterTest", MemcachedClusterTest)
t.Run("Local", MemcachedLocal)
})
}
func memcachedLeaderTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
return err
}
label := map[string]string{"name": operatorName}
leader, err := verifyLeader(t, namespace, f, label)
if err != nil {
return err
}
// delete the leader's pod so a new leader will get elected
err = f.Client.Delete(context.TODO(), leader)
if err != nil {
return err
}
err = e2eutil.WaitForDeletion(t, f.Client.Client, leader, retryInterval, timeout)
if err != nil {
return err
}
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
return err
}
newLeader, err := verifyLeader(t, namespace, f, label)
if err != nil {
return err
}
if newLeader.Name == leader.Name {
return fmt.Errorf("leader pod name did not change across pod delete")
}
return nil
}
func verifyLeader(t *testing.T, namespace string, f *framework.Framework, labels map[string]string) (*v1.Pod, error) |
func memcachedScaleTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
// create example-memcached yaml file
filename := "deploy/cr.yaml"
err := ioutil.WriteFile(filename,
[]byte(crYAML),
fileutil.DefaultFileMode)
if err != nil {
return err
}
// create memcached custom resource
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
return err
}
t.Log("Created cr")
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
// wait for example-memcached to reach 3 replicas
err = e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-memcached", 3, retryInterval, timeout)
if err != nil {
return err
}
// get fresh copy of memcached object as unstructured
obj := unstructured.Unstructured{}
jsonSpec, err := yaml.YAMLToJSON([]byte(crYAML))
if err != nil {
return fmt.Errorf("could not convert yaml file to json: %v", err)
}
if err := obj.UnmarshalJSON(jsonSpec); err != nil {
t.Fatalf("Failed to unmarshal memcached CR: (%v)", err)
}
obj.SetNamespace(namespace)
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, &obj)
if err != nil {
return fmt.Errorf("failed to get memcached object: %s", err)
}
// update memcached CR size to 4
spec, ok := obj.Object["spec"].(map[string]interface{})
if !ok {
return errors.New("memcached object missing spec field")
}
spec["size"] = 4
err = f.Client.Update(context.TODO(), &obj)
if err != nil {
return err
}
// wait for example-memcached to reach 4 replicas
return e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-memcached", 4, retryInterval, timeout)
}
func MemcachedLocal(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
cmd := exec.Command("operator-sdk", "up", "local", "--namespace="+namespace)
stderr, err := os.Create("stderr.txt")
if err != nil {
t.Fatalf("Failed to create stderr.txt: %v", err)
}
cmd.Stderr = stderr
defer func() {
if err := stderr.Close(); err != nil && !fileutil.IsClosedError(err) {
t.Errorf("Failed to close stderr: (%v)", err)
}
}()
err = cmd.Start()
if err != nil {
t.Fatalf("Error: %v", err)
}
ctx.AddCleanupFn(func() error { return cmd.Process.Signal(os.Interrupt) })
// wait for operator to start (may take a minute to compile the command...)
err = wait.Poll(time.Second*5, time.Second*100, func() (done bool, err error) {
file, err := ioutil.ReadFile("stderr.txt")
if err != nil {
return false, err
}
if len(file) == 0 {
return false, nil
}
return true, nil
})
if err != nil {
t.Fatalf("Local operator not ready after 100 seconds: %v\n", err)
}
if err = memcachedScaleTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
}
func MemcachedCluster(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
operatorYAML, err := ioutil.ReadFile("deploy/operator.yaml")
if err != nil {
t.Fatalf("Could not read deploy/operator.yaml: %v", err)
}
local := *e2eImageName == ""
if local {
*e2eImageName = "quay.io/example/memcached-operator:v0.0.1"
if err != nil {
t.Fatal(err)
}
operatorYAML = bytes.Replace(operatorYAML, []byte("imagePullPolicy: Always"), []byte("imagePullPolicy: Never"), 1)
err = ioutil.WriteFile("deploy/operator.yaml", operatorYAML, fileutil.DefaultFileMode)
if err != nil {
t.Fatal(err)
}
}
operatorYAML = bytes.Replace(operatorYAML, []byte("REPLACE_IMAGE"), []byte(*e2eImageName), 1)
err = ioutil.WriteFile("deploy/operator.yaml", operatorYAML, os.FileMode(0644))
if err != nil {
t.Fatalf("Failed to write deploy/operator.yaml: %v", err)
}
t.Log("Building operator docker image")
cmdOut, err := exec.Command("operator-sdk", "build", *e2eImageName,
"--enable-tests",
"--test-location", "./test/e2e",
"--namespaced-manifest", "deploy/operator.yaml").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
if !local {
t.Log("Pushing docker image to repo")
cmdOut, err = exec.Command("docker", "push", *e2eImageName).CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
}
file, err := yamlutil.GenerateCombinedNamespacedManifest(scaffold.DeployDir)
if err != nil {
t.Fatal(err)
}
// create namespaced resources
filename := file.Name()
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created namespaced resources")
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
// wait for memcached-operator to be ready
err = e2eutil.WaitForOperatorDeployment(t, framework.Global.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
t.Fatal(err)
}
if err = memcachedLeaderTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
if err = memcachedScaleTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
if err = memcachedMetricsTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
}
func MemcachedClusterTest(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
// create sa
filename := "deploy/service_account.yaml"
framework.Global.NamespacedManPath = &filename
err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created sa")
// create rbac
filename = "deploy/role.yaml"
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created role")
filename = "deploy/role_binding.yaml"
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created role_binding")
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatalf("Could not get namespace: %v", err)
}
cmdOut, err := exec.Command("operator-sdk", "test", "cluster", *e2eImageName,
"--namespace", namespace,
"--image-pull-policy", "Never",
"--service-account", operatorName).CombinedOutput()
if err != nil {
t.Fatalf("In-cluster test failed: %v\nCommand Output:\n%s", err, string(cmdOut))
}
}
func memcachedMetricsTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
// Make sure metrics Service exists
s := v1.Service{}
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: operatorName, Namespace: namespace}, &s)
if err != nil {
return fmt.Errorf("could not get metrics Service: (%v)", err)
}
// Get operator pod
pods := v1.PodList{}
opts := client.InNamespace(namespace)
if len(s.Spec.Selector) == 0 {
return fmt.Errorf("no labels found in metrics Service")
}
for k, v := range s.Spec.Selector {
if err := opts.SetLabelSelector(fmt.Sprintf("%s=%s", k, v)); err != nil {
return fmt.Errorf("failed to set list label selector: (%v)", err)
}
}
if err := opts.SetFieldSelector("status.phase=Running"); err != nil {
return fmt.Errorf("failed to set list field selector: (%v)", err)
}
err = f.Client.List(context.TODO(), opts, &pods)
if err != nil {
return fmt.Errorf("failed to get pods: (%v)", err)
}
podName := ""
numPods := len(pods.Items)
// TODO(lili): Remove below logic when we enable exposing metrics in all pods.
if numPods == 0 {
podName = pods.Items[0].Name
} else if numPods > 1 {
// If we got more than one pod, get leader pod name.
leader, err := verifyLeader(t, namespace, f, s.Spec.Selector)
if err != nil {
return err
}
podName = leader.Name
} else {
return fmt.Errorf("failed to get operator pod: could not select any pods with Service selector %v", s.Spec.Selector)
}
// Pod name must be there, otherwise we cannot read metrics data via pod proxy.
if podName == "" {
return fmt.Errorf("failed to get pod name")
}
// Get metrics data
request := proxyViaPod(f.KubeClient, namespace, podName, "8383", "/metrics")
response, err := request.DoRaw()
if err != nil {
return fmt.Errorf("failed to get response from metrics: %v", err)
}
// Make sure metrics are present
if len(response) == 0 {
return fmt.Errorf("metrics body is empty")
}
// Perform prometheus metrics lint checks
l := promlint.New(bytes.NewReader(response))
problems, err := l.Lint()
if err != nil {
return fmt.Errorf("failed to lint metrics: %v", err)
}
// TODO(lili): Change to 0, when we upgrade to 1.14.
// currently there is a problem with one of the metrics in upstream Kubernetes:
// `workqueue_longest_running_processor_microseconds`.
// This has been fixed in 1.14 release.
if len(problems) > 1 {
return fmt.Errorf("found problems with metrics: %#+v", problems)
}
return nil
}
func proxyViaPod(kubeClient kubernetes.Interface, namespace, podName, podPortName, path string) *rest.Request {
return kubeClient.
CoreV1().
RESTClient().
Get().
Namespace(namespace).
Resource("pods").
SubResource("proxy").
Name(fmt.Sprintf("%s:%s", podName, podPortName)).
Suffix(path)
}
| {
// get configmap, which is the lock
lockName := "memcached-operator-lock"
lock := v1.ConfigMap{}
err := wait.Poll(retryInterval, timeout, func() (done bool, err error) {
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: lockName, Namespace: namespace}, &lock)
if err != nil {
if apierrors.IsNotFound(err) {
t.Logf("Waiting for availability of leader lock configmap %s\n", lockName)
return false, nil
}
return false, err
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("error getting leader lock configmap: %v\n", err)
}
t.Logf("Found leader lock configmap %s\n", lockName)
owners := lock.GetOwnerReferences()
if len(owners) != 1 {
return nil, fmt.Errorf("leader lock has %d owner refs, expected 1", len(owners))
}
owner := owners[0]
// get operator pods
pods := v1.PodList{}
opts := client.ListOptions{Namespace: namespace}
for k, v := range labels {
if err := opts.SetLabelSelector(fmt.Sprintf("%s=%s", k, v)); err != nil {
return nil, fmt.Errorf("failed to set list label selector: (%v)", err)
}
}
if err := opts.SetFieldSelector("status.phase=Running"); err != nil {
t.Fatalf("Failed to set list field selector: (%v)", err)
}
err = f.Client.List(context.TODO(), &opts, &pods)
if err != nil {
return nil, err
}
if len(pods.Items) != 2 {
return nil, fmt.Errorf("expected 2 pods, found %d", len(pods.Items))
}
// find and return the leader
for _, pod := range pods.Items {
if pod.Name == owner.Name {
return &pod, nil
}
}
return nil, fmt.Errorf("did not find operator pod that was referenced by configmap")
} | identifier_body |
memcached_test.go | // Copyright 2018 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/ghodss/yaml"
"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
"github.com/operator-framework/operator-sdk/internal/util/fileutil"
"github.com/operator-framework/operator-sdk/internal/util/projutil"
"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
"github.com/prometheus/prometheus/util/promlint"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
crYAML string = "apiVersion: \"cache.example.com/v1alpha1\"\nkind: \"Memcached\"\nmetadata:\n name: \"example-memcached\"\nspec:\n size: 3"
retryInterval = time.Second * 5
timeout = time.Second * 120
cleanupRetryInterval = time.Second * 1
cleanupTimeout = time.Second * 10
operatorName = "memcached-operator"
)
func TestMemcached(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
gopath, ok := os.LookupEnv(projutil.GopathEnv)
if !ok {
t.Fatalf("$GOPATH not set")
}
cd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
defer func() {
if err := os.Chdir(cd); err != nil {
t.Errorf("Failed to change back to original working directory: (%v)", err)
}
}()
// Setup
absProjectPath := filepath.Join(gopath, "src/github.com/example-inc")
if err := os.MkdirAll(absProjectPath, fileutil.DefaultDirFileMode); err != nil {
t.Fatal(err)
}
if err := os.Chdir(absProjectPath); err != nil {
t.Fatal(err)
}
t.Log("Creating new operator project")
cmdOut, err := exec.Command("operator-sdk",
"new",
operatorName).CombinedOutput()
if err != nil {
// HACK: dep cannot resolve non-master branches as the base branch for PR's,
// so running `dep ensure` will fail when first running
// `operator-sdk new ...`. For now we can ignore the first solve failure.
// A permanent solution can be implemented once the following is merged:
// https://github.com/golang/dep/pull/1658
solveFailRe := regexp.MustCompile(`(?m)^[ \t]*Solving failure:.+github\.com/operator-framework/operator-sdk.+:$`)
if !solveFailRe.Match(cmdOut) {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
}
ctx.AddCleanupFn(func() error { return os.RemoveAll(absProjectPath) })
if err := os.Chdir(operatorName); err != nil {
t.Fatalf("Failed to change to %s directory: (%v)", operatorName, err)
}
repo, ok := os.LookupEnv("TRAVIS_PULL_REQUEST_SLUG")
if repo == "" {
repo, ok = os.LookupEnv("TRAVIS_REPO_SLUG")
}
if ok && repo != "" && repo != "operator-framework/operator-sdk" {
commitSha, ok := os.LookupEnv("TRAVIS_PULL_REQUEST_SHA")
if commitSha == "" {
commitSha, ok = os.LookupEnv("TRAVIS_COMMIT")
}
if ok && commitSha != "" {
gopkg, err := ioutil.ReadFile("Gopkg.toml")
if err != nil {
t.Fatal(err)
}
// Match against the '#osdk_branch_annotation' used for version substitution
// and comment out the current branch.
branchRe := regexp.MustCompile("([ ]+)(.+#osdk_branch_annotation)")
gopkg = branchRe.ReplaceAll(gopkg, []byte("$1# $2"))
versionRe := regexp.MustCompile("([ ]+)(.+#osdk_version_annotation)")
gopkg = versionRe.ReplaceAll(gopkg, []byte("$1# $2"))
// Plug in the fork to test against so `dep ensure` can resolve dependencies
// correctly.
gopkgString := string(gopkg)
gopkgLoc := strings.LastIndex(gopkgString, "\n name = \"github.com/operator-framework/operator-sdk\"\n")
gopkgString = gopkgString[:gopkgLoc] + "\n source = \"https://github.com/" + repo + "\"\n revision = \"" + commitSha + "\"\n" + gopkgString[gopkgLoc+1:]
err = ioutil.WriteFile("Gopkg.toml", []byte(gopkgString), fileutil.DefaultFileMode)
if err != nil {
t.Fatalf("Failed to write updated Gopkg.toml: %v", err)
}
t.Logf("Gopkg.toml: %v", gopkgString)
} else {
t.Fatal("Could not find sha of PR")
}
}
cmdOut, err = exec.Command("dep", "ensure").CombinedOutput()
if err != nil {
t.Fatalf("Error after modifying Gopkg.toml: %v\nCommand Output: %s\n", err, string(cmdOut))
}
// Set replicas to 2 to test leader election. In production, this should
// almost always be set to 1, because there isn't generally value in having
// a hot spare operator process.
opYaml, err := ioutil.ReadFile("deploy/operator.yaml")
if err != nil {
t.Fatalf("Could not read deploy/operator.yaml: %v", err)
}
newOpYaml := bytes.Replace(opYaml, []byte("replicas: 1"), []byte("replicas: 2"), 1)
err = ioutil.WriteFile("deploy/operator.yaml", newOpYaml, 0644)
if err != nil {
t.Fatalf("Could not write deploy/operator.yaml: %v", err)
}
cmd := exec.Command("operator-sdk",
"add",
"api",
"--api-version=cache.example.com/v1alpha1",
"--kind=Memcached")
cmd.Env = os.Environ()
cmdOut, err = cmd.CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
cmdOut, err = exec.Command("operator-sdk",
"add",
"controller",
"--api-version=cache.example.com/v1alpha1",
"--kind=Memcached").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
cmdOut, err = exec.Command("cp", "-a", filepath.Join(gopath, "src/github.com/operator-framework/operator-sdk/example/memcached-operator/memcached_controller.go.tmpl"),
"pkg/controller/memcached/memcached_controller.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not copy memcached example to to pkg/controller/memcached/memcached_controller.go: %v\nCommand Output:\n%v", err, string(cmdOut))
}
memcachedTypesFile, err := ioutil.ReadFile("pkg/apis/cache/v1alpha1/memcached_types.go")
if err != nil {
t.Fatal(err)
}
memcachedTypesFileLines := bytes.Split(memcachedTypesFile, []byte("\n"))
for lineNum, line := range memcachedTypesFileLines {
if strings.Contains(string(line), "type MemcachedSpec struct {") {
memcachedTypesFileLinesIntermediate := append(memcachedTypesFileLines[:lineNum+1], []byte("\tSize int32 `json:\"size\"`"))
memcachedTypesFileLines = append(memcachedTypesFileLinesIntermediate, memcachedTypesFileLines[lineNum+3:]...)
break
}
}
for lineNum, line := range memcachedTypesFileLines {
if strings.Contains(string(line), "type MemcachedStatus struct {") {
memcachedTypesFileLinesIntermediate := append(memcachedTypesFileLines[:lineNum+1], []byte("\tNodes []string `json:\"nodes\"`"))
memcachedTypesFileLines = append(memcachedTypesFileLinesIntermediate, memcachedTypesFileLines[lineNum+3:]...)
break
}
}
if err := os.Remove("pkg/apis/cache/v1alpha1/memcached_types.go"); err != nil {
t.Fatalf("Failed to remove old memcached_type.go file: (%v)", err)
}
err = ioutil.WriteFile("pkg/apis/cache/v1alpha1/memcached_types.go", bytes.Join(memcachedTypesFileLines, []byte("\n")), fileutil.DefaultFileMode)
if err != nil {
t.Fatal(err)
}
t.Log("Generating k8s")
cmdOut, err = exec.Command("operator-sdk", "generate", "k8s").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
t.Log("Copying test files to ./test")
if err = os.MkdirAll("./test", fileutil.DefaultDirFileMode); err != nil {
t.Fatalf("Could not create test/e2e dir: %v", err)
}
cmdOut, err = exec.Command("cp", "-a", filepath.Join(gopath, "src/github.com/operator-framework/operator-sdk/test/e2e/incluster-test-code"), "./test/e2e").CombinedOutput()
if err != nil {
t.Fatalf("Could not copy tests to test/e2e: %v\nCommand Output:\n%v", err, string(cmdOut))
}
// fix naming of files
cmdOut, err = exec.Command("mv", "test/e2e/main_test.go.tmpl", "test/e2e/main_test.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not rename test/e2e/main_test.go.tmpl: %v\nCommand Output:\n%v", err, string(cmdOut))
}
cmdOut, err = exec.Command("mv", "test/e2e/memcached_test.go.tmpl", "test/e2e/memcached_test.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not rename test/e2e/memcached_test.go.tmpl: %v\nCommand Output:\n%v", err, string(cmdOut))
}
t.Log("Pulling new dependencies with dep ensure")
cmdOut, err = exec.Command("dep", "ensure").CombinedOutput()
if err != nil {
t.Fatalf("Command 'dep ensure' failed: %v\nCommand Output:\n%v", err, string(cmdOut))
}
// link local sdk to vendor if not in travis
if repo == "" {
for _, dir := range []string{"pkg", "internal"} {
repoDir := filepath.Join("github.com/operator-framework/operator-sdk", dir)
vendorDir := filepath.Join("vendor", repoDir)
if err := os.RemoveAll(vendorDir); err != nil {
t.Fatalf("Failed to delete old vendor directory: (%v)", err)
}
if err := os.Symlink(filepath.Join(gopath, projutil.SrcDir, repoDir), vendorDir); err != nil {
t.Fatalf("Failed to symlink local operator-sdk project to vendor dir: (%v)", err)
}
}
}
file, err := yamlutil.GenerateCombinedGlobalManifest(scaffold.CRDsDir)
if err != nil {
t.Fatal(err)
}
// hacky way to use createFromYAML without exposing the method
// create crd
filename := file.Name()
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created global resources")
// run subtests
t.Run("memcached-group", func(t *testing.T) {
t.Run("Cluster", MemcachedCluster)
t.Run("ClusterTest", MemcachedClusterTest)
t.Run("Local", MemcachedLocal)
})
}
func memcachedLeaderTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
return err
}
label := map[string]string{"name": operatorName}
leader, err := verifyLeader(t, namespace, f, label)
if err != nil {
return err
}
// delete the leader's pod so a new leader will get elected
err = f.Client.Delete(context.TODO(), leader)
if err != nil {
return err
}
err = e2eutil.WaitForDeletion(t, f.Client.Client, leader, retryInterval, timeout)
if err != nil {
return err
}
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
return err
}
newLeader, err := verifyLeader(t, namespace, f, label)
if err != nil {
return err
}
if newLeader.Name == leader.Name {
return fmt.Errorf("leader pod name did not change across pod delete")
}
return nil
}
func verifyLeader(t *testing.T, namespace string, f *framework.Framework, labels map[string]string) (*v1.Pod, error) {
// get configmap, which is the lock
lockName := "memcached-operator-lock"
lock := v1.ConfigMap{}
err := wait.Poll(retryInterval, timeout, func() (done bool, err error) {
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: lockName, Namespace: namespace}, &lock)
if err != nil {
if apierrors.IsNotFound(err) {
t.Logf("Waiting for availability of leader lock configmap %s\n", lockName)
return false, nil
}
return false, err
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("error getting leader lock configmap: %v\n", err)
}
t.Logf("Found leader lock configmap %s\n", lockName)
owners := lock.GetOwnerReferences()
if len(owners) != 1 {
return nil, fmt.Errorf("leader lock has %d owner refs, expected 1", len(owners))
}
owner := owners[0]
// get operator pods
pods := v1.PodList{}
opts := client.ListOptions{Namespace: namespace}
for k, v := range labels {
if err := opts.SetLabelSelector(fmt.Sprintf("%s=%s", k, v)); err != nil {
return nil, fmt.Errorf("failed to set list label selector: (%v)", err)
}
}
if err := opts.SetFieldSelector("status.phase=Running"); err != nil {
t.Fatalf("Failed to set list field selector: (%v)", err)
}
err = f.Client.List(context.TODO(), &opts, &pods)
if err != nil {
return nil, err
}
if len(pods.Items) != 2 {
return nil, fmt.Errorf("expected 2 pods, found %d", len(pods.Items))
}
// find and return the leader
for _, pod := range pods.Items {
if pod.Name == owner.Name {
return &pod, nil
}
}
return nil, fmt.Errorf("did not find operator pod that was referenced by configmap")
}
func memcachedScaleTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
// create example-memcached yaml file
filename := "deploy/cr.yaml"
err := ioutil.WriteFile(filename,
[]byte(crYAML),
fileutil.DefaultFileMode)
if err != nil {
return err
}
// create memcached custom resource
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
return err
}
t.Log("Created cr")
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
// wait for example-memcached to reach 3 replicas
err = e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-memcached", 3, retryInterval, timeout)
if err != nil {
return err
}
// get fresh copy of memcached object as unstructured
obj := unstructured.Unstructured{}
jsonSpec, err := yaml.YAMLToJSON([]byte(crYAML))
if err != nil {
return fmt.Errorf("could not convert yaml file to json: %v", err)
}
if err := obj.UnmarshalJSON(jsonSpec); err != nil {
t.Fatalf("Failed to unmarshal memcached CR: (%v)", err)
}
obj.SetNamespace(namespace)
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, &obj)
if err != nil {
return fmt.Errorf("failed to get memcached object: %s", err)
}
// update memcached CR size to 4
spec, ok := obj.Object["spec"].(map[string]interface{})
if !ok {
return errors.New("memcached object missing spec field")
}
spec["size"] = 4
err = f.Client.Update(context.TODO(), &obj)
if err != nil {
return err
}
// wait for example-memcached to reach 4 replicas
return e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-memcached", 4, retryInterval, timeout)
}
func | (t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
cmd := exec.Command("operator-sdk", "up", "local", "--namespace="+namespace)
stderr, err := os.Create("stderr.txt")
if err != nil {
t.Fatalf("Failed to create stderr.txt: %v", err)
}
cmd.Stderr = stderr
defer func() {
if err := stderr.Close(); err != nil && !fileutil.IsClosedError(err) {
t.Errorf("Failed to close stderr: (%v)", err)
}
}()
err = cmd.Start()
if err != nil {
t.Fatalf("Error: %v", err)
}
ctx.AddCleanupFn(func() error { return cmd.Process.Signal(os.Interrupt) })
// wait for operator to start (may take a minute to compile the command...)
err = wait.Poll(time.Second*5, time.Second*100, func() (done bool, err error) {
file, err := ioutil.ReadFile("stderr.txt")
if err != nil {
return false, err
}
if len(file) == 0 {
return false, nil
}
return true, nil
})
if err != nil {
t.Fatalf("Local operator not ready after 100 seconds: %v\n", err)
}
if err = memcachedScaleTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
}
func MemcachedCluster(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
operatorYAML, err := ioutil.ReadFile("deploy/operator.yaml")
if err != nil {
t.Fatalf("Could not read deploy/operator.yaml: %v", err)
}
local := *e2eImageName == ""
if local {
*e2eImageName = "quay.io/example/memcached-operator:v0.0.1"
if err != nil {
t.Fatal(err)
}
operatorYAML = bytes.Replace(operatorYAML, []byte("imagePullPolicy: Always"), []byte("imagePullPolicy: Never"), 1)
err = ioutil.WriteFile("deploy/operator.yaml", operatorYAML, fileutil.DefaultFileMode)
if err != nil {
t.Fatal(err)
}
}
operatorYAML = bytes.Replace(operatorYAML, []byte("REPLACE_IMAGE"), []byte(*e2eImageName), 1)
err = ioutil.WriteFile("deploy/operator.yaml", operatorYAML, os.FileMode(0644))
if err != nil {
t.Fatalf("Failed to write deploy/operator.yaml: %v", err)
}
t.Log("Building operator docker image")
cmdOut, err := exec.Command("operator-sdk", "build", *e2eImageName,
"--enable-tests",
"--test-location", "./test/e2e",
"--namespaced-manifest", "deploy/operator.yaml").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
if !local {
t.Log("Pushing docker image to repo")
cmdOut, err = exec.Command("docker", "push", *e2eImageName).CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
}
file, err := yamlutil.GenerateCombinedNamespacedManifest(scaffold.DeployDir)
if err != nil {
t.Fatal(err)
}
// create namespaced resources
filename := file.Name()
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created namespaced resources")
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
// wait for memcached-operator to be ready
err = e2eutil.WaitForOperatorDeployment(t, framework.Global.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
t.Fatal(err)
}
if err = memcachedLeaderTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
if err = memcachedScaleTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
if err = memcachedMetricsTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
}
func MemcachedClusterTest(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
// create sa
filename := "deploy/service_account.yaml"
framework.Global.NamespacedManPath = &filename
err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created sa")
// create rbac
filename = "deploy/role.yaml"
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created role")
filename = "deploy/role_binding.yaml"
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created role_binding")
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatalf("Could not get namespace: %v", err)
}
cmdOut, err := exec.Command("operator-sdk", "test", "cluster", *e2eImageName,
"--namespace", namespace,
"--image-pull-policy", "Never",
"--service-account", operatorName).CombinedOutput()
if err != nil {
t.Fatalf("In-cluster test failed: %v\nCommand Output:\n%s", err, string(cmdOut))
}
}
func memcachedMetricsTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
// Make sure metrics Service exists
s := v1.Service{}
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: operatorName, Namespace: namespace}, &s)
if err != nil {
return fmt.Errorf("could not get metrics Service: (%v)", err)
}
// Get operator pod
pods := v1.PodList{}
opts := client.InNamespace(namespace)
if len(s.Spec.Selector) == 0 {
return fmt.Errorf("no labels found in metrics Service")
}
for k, v := range s.Spec.Selector {
if err := opts.SetLabelSelector(fmt.Sprintf("%s=%s", k, v)); err != nil {
return fmt.Errorf("failed to set list label selector: (%v)", err)
}
}
if err := opts.SetFieldSelector("status.phase=Running"); err != nil {
return fmt.Errorf("failed to set list field selector: (%v)", err)
}
err = f.Client.List(context.TODO(), opts, &pods)
if err != nil {
return fmt.Errorf("failed to get pods: (%v)", err)
}
podName := ""
numPods := len(pods.Items)
// TODO(lili): Remove below logic when we enable exposing metrics in all pods.
if numPods == 0 {
podName = pods.Items[0].Name
} else if numPods > 1 {
// If we got more than one pod, get leader pod name.
leader, err := verifyLeader(t, namespace, f, s.Spec.Selector)
if err != nil {
return err
}
podName = leader.Name
} else {
return fmt.Errorf("failed to get operator pod: could not select any pods with Service selector %v", s.Spec.Selector)
}
// Pod name must be there, otherwise we cannot read metrics data via pod proxy.
if podName == "" {
return fmt.Errorf("failed to get pod name")
}
// Get metrics data
request := proxyViaPod(f.KubeClient, namespace, podName, "8383", "/metrics")
response, err := request.DoRaw()
if err != nil {
return fmt.Errorf("failed to get response from metrics: %v", err)
}
// Make sure metrics are present
if len(response) == 0 {
return fmt.Errorf("metrics body is empty")
}
// Perform prometheus metrics lint checks
l := promlint.New(bytes.NewReader(response))
problems, err := l.Lint()
if err != nil {
return fmt.Errorf("failed to lint metrics: %v", err)
}
// TODO(lili): Change to 0, when we upgrade to 1.14.
// currently there is a problem with one of the metrics in upstream Kubernetes:
// `workqueue_longest_running_processor_microseconds`.
// This has been fixed in 1.14 release.
if len(problems) > 1 {
return fmt.Errorf("found problems with metrics: %#+v", problems)
}
return nil
}
func proxyViaPod(kubeClient kubernetes.Interface, namespace, podName, podPortName, path string) *rest.Request {
return kubeClient.
CoreV1().
RESTClient().
Get().
Namespace(namespace).
Resource("pods").
SubResource("proxy").
Name(fmt.Sprintf("%s:%s", podName, podPortName)).
Suffix(path)
}
| MemcachedLocal | identifier_name |
memcached_test.go | // Copyright 2018 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/ghodss/yaml"
"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
"github.com/operator-framework/operator-sdk/internal/util/fileutil"
"github.com/operator-framework/operator-sdk/internal/util/projutil"
"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
"github.com/prometheus/prometheus/util/promlint"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
crYAML string = "apiVersion: \"cache.example.com/v1alpha1\"\nkind: \"Memcached\"\nmetadata:\n name: \"example-memcached\"\nspec:\n size: 3"
retryInterval = time.Second * 5
timeout = time.Second * 120
cleanupRetryInterval = time.Second * 1
cleanupTimeout = time.Second * 10
operatorName = "memcached-operator"
)
func TestMemcached(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
gopath, ok := os.LookupEnv(projutil.GopathEnv)
if !ok {
t.Fatalf("$GOPATH not set")
}
cd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
defer func() {
if err := os.Chdir(cd); err != nil {
t.Errorf("Failed to change back to original working directory: (%v)", err)
}
}()
// Setup
absProjectPath := filepath.Join(gopath, "src/github.com/example-inc")
if err := os.MkdirAll(absProjectPath, fileutil.DefaultDirFileMode); err != nil {
t.Fatal(err)
}
if err := os.Chdir(absProjectPath); err != nil {
t.Fatal(err)
}
t.Log("Creating new operator project")
cmdOut, err := exec.Command("operator-sdk",
"new",
operatorName).CombinedOutput()
if err != nil {
// HACK: dep cannot resolve non-master branches as the base branch for PR's,
// so running `dep ensure` will fail when first running
// `operator-sdk new ...`. For now we can ignore the first solve failure.
// A permanent solution can be implemented once the following is merged:
// https://github.com/golang/dep/pull/1658
solveFailRe := regexp.MustCompile(`(?m)^[ \t]*Solving failure:.+github\.com/operator-framework/operator-sdk.+:$`)
if !solveFailRe.Match(cmdOut) {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
}
ctx.AddCleanupFn(func() error { return os.RemoveAll(absProjectPath) })
if err := os.Chdir(operatorName); err != nil {
t.Fatalf("Failed to change to %s directory: (%v)", operatorName, err)
}
repo, ok := os.LookupEnv("TRAVIS_PULL_REQUEST_SLUG")
if repo == "" {
repo, ok = os.LookupEnv("TRAVIS_REPO_SLUG")
}
if ok && repo != "" && repo != "operator-framework/operator-sdk" {
commitSha, ok := os.LookupEnv("TRAVIS_PULL_REQUEST_SHA")
if commitSha == "" {
commitSha, ok = os.LookupEnv("TRAVIS_COMMIT")
}
if ok && commitSha != "" {
gopkg, err := ioutil.ReadFile("Gopkg.toml")
if err != nil {
t.Fatal(err)
}
// Match against the '#osdk_branch_annotation' used for version substitution
// and comment out the current branch.
branchRe := regexp.MustCompile("([ ]+)(.+#osdk_branch_annotation)")
gopkg = branchRe.ReplaceAll(gopkg, []byte("$1# $2"))
versionRe := regexp.MustCompile("([ ]+)(.+#osdk_version_annotation)")
gopkg = versionRe.ReplaceAll(gopkg, []byte("$1# $2"))
// Plug in the fork to test against so `dep ensure` can resolve dependencies
// correctly.
gopkgString := string(gopkg)
gopkgLoc := strings.LastIndex(gopkgString, "\n name = \"github.com/operator-framework/operator-sdk\"\n")
gopkgString = gopkgString[:gopkgLoc] + "\n source = \"https://github.com/" + repo + "\"\n revision = \"" + commitSha + "\"\n" + gopkgString[gopkgLoc+1:]
err = ioutil.WriteFile("Gopkg.toml", []byte(gopkgString), fileutil.DefaultFileMode)
if err != nil {
t.Fatalf("Failed to write updated Gopkg.toml: %v", err)
}
t.Logf("Gopkg.toml: %v", gopkgString)
} else {
t.Fatal("Could not find sha of PR")
}
}
cmdOut, err = exec.Command("dep", "ensure").CombinedOutput()
if err != nil {
t.Fatalf("Error after modifying Gopkg.toml: %v\nCommand Output: %s\n", err, string(cmdOut))
}
// Set replicas to 2 to test leader election. In production, this should
// almost always be set to 1, because there isn't generally value in having
// a hot spare operator process.
opYaml, err := ioutil.ReadFile("deploy/operator.yaml")
if err != nil {
t.Fatalf("Could not read deploy/operator.yaml: %v", err)
}
newOpYaml := bytes.Replace(opYaml, []byte("replicas: 1"), []byte("replicas: 2"), 1)
err = ioutil.WriteFile("deploy/operator.yaml", newOpYaml, 0644)
if err != nil {
t.Fatalf("Could not write deploy/operator.yaml: %v", err)
}
cmd := exec.Command("operator-sdk",
"add",
"api",
"--api-version=cache.example.com/v1alpha1",
"--kind=Memcached")
cmd.Env = os.Environ()
cmdOut, err = cmd.CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
cmdOut, err = exec.Command("operator-sdk",
"add",
"controller",
"--api-version=cache.example.com/v1alpha1",
"--kind=Memcached").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
cmdOut, err = exec.Command("cp", "-a", filepath.Join(gopath, "src/github.com/operator-framework/operator-sdk/example/memcached-operator/memcached_controller.go.tmpl"),
"pkg/controller/memcached/memcached_controller.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not copy memcached example to to pkg/controller/memcached/memcached_controller.go: %v\nCommand Output:\n%v", err, string(cmdOut))
}
memcachedTypesFile, err := ioutil.ReadFile("pkg/apis/cache/v1alpha1/memcached_types.go")
if err != nil {
t.Fatal(err)
}
memcachedTypesFileLines := bytes.Split(memcachedTypesFile, []byte("\n"))
for lineNum, line := range memcachedTypesFileLines {
if strings.Contains(string(line), "type MemcachedSpec struct {") {
memcachedTypesFileLinesIntermediate := append(memcachedTypesFileLines[:lineNum+1], []byte("\tSize int32 `json:\"size\"`"))
memcachedTypesFileLines = append(memcachedTypesFileLinesIntermediate, memcachedTypesFileLines[lineNum+3:]...)
break
}
}
for lineNum, line := range memcachedTypesFileLines {
if strings.Contains(string(line), "type MemcachedStatus struct {") {
memcachedTypesFileLinesIntermediate := append(memcachedTypesFileLines[:lineNum+1], []byte("\tNodes []string `json:\"nodes\"`"))
memcachedTypesFileLines = append(memcachedTypesFileLinesIntermediate, memcachedTypesFileLines[lineNum+3:]...)
break
}
}
if err := os.Remove("pkg/apis/cache/v1alpha1/memcached_types.go"); err != nil {
t.Fatalf("Failed to remove old memcached_type.go file: (%v)", err)
}
err = ioutil.WriteFile("pkg/apis/cache/v1alpha1/memcached_types.go", bytes.Join(memcachedTypesFileLines, []byte("\n")), fileutil.DefaultFileMode)
if err != nil {
t.Fatal(err)
}
t.Log("Generating k8s")
cmdOut, err = exec.Command("operator-sdk", "generate", "k8s").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
t.Log("Copying test files to ./test")
if err = os.MkdirAll("./test", fileutil.DefaultDirFileMode); err != nil {
t.Fatalf("Could not create test/e2e dir: %v", err)
}
cmdOut, err = exec.Command("cp", "-a", filepath.Join(gopath, "src/github.com/operator-framework/operator-sdk/test/e2e/incluster-test-code"), "./test/e2e").CombinedOutput()
if err != nil {
t.Fatalf("Could not copy tests to test/e2e: %v\nCommand Output:\n%v", err, string(cmdOut))
}
// fix naming of files
cmdOut, err = exec.Command("mv", "test/e2e/main_test.go.tmpl", "test/e2e/main_test.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not rename test/e2e/main_test.go.tmpl: %v\nCommand Output:\n%v", err, string(cmdOut))
}
cmdOut, err = exec.Command("mv", "test/e2e/memcached_test.go.tmpl", "test/e2e/memcached_test.go").CombinedOutput()
if err != nil {
t.Fatalf("Could not rename test/e2e/memcached_test.go.tmpl: %v\nCommand Output:\n%v", err, string(cmdOut))
}
t.Log("Pulling new dependencies with dep ensure")
cmdOut, err = exec.Command("dep", "ensure").CombinedOutput()
if err != nil {
t.Fatalf("Command 'dep ensure' failed: %v\nCommand Output:\n%v", err, string(cmdOut))
}
// link local sdk to vendor if not in travis
if repo == "" {
for _, dir := range []string{"pkg", "internal"} {
repoDir := filepath.Join("github.com/operator-framework/operator-sdk", dir)
vendorDir := filepath.Join("vendor", repoDir)
if err := os.RemoveAll(vendorDir); err != nil {
t.Fatalf("Failed to delete old vendor directory: (%v)", err)
}
if err := os.Symlink(filepath.Join(gopath, projutil.SrcDir, repoDir), vendorDir); err != nil {
t.Fatalf("Failed to symlink local operator-sdk project to vendor dir: (%v)", err)
}
}
}
file, err := yamlutil.GenerateCombinedGlobalManifest(scaffold.CRDsDir)
if err != nil {
t.Fatal(err)
}
// hacky way to use createFromYAML without exposing the method
// create crd
filename := file.Name()
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created global resources")
// run subtests
t.Run("memcached-group", func(t *testing.T) {
t.Run("Cluster", MemcachedCluster)
t.Run("ClusterTest", MemcachedClusterTest)
t.Run("Local", MemcachedLocal)
})
}
func memcachedLeaderTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
return err
}
label := map[string]string{"name": operatorName}
leader, err := verifyLeader(t, namespace, f, label)
if err != nil {
return err
}
// delete the leader's pod so a new leader will get elected
err = f.Client.Delete(context.TODO(), leader)
if err != nil {
return err
}
err = e2eutil.WaitForDeletion(t, f.Client.Client, leader, retryInterval, timeout)
if err != nil {
return err
}
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
return err
}
newLeader, err := verifyLeader(t, namespace, f, label)
if err != nil {
return err
}
if newLeader.Name == leader.Name {
return fmt.Errorf("leader pod name did not change across pod delete")
}
return nil
}
func verifyLeader(t *testing.T, namespace string, f *framework.Framework, labels map[string]string) (*v1.Pod, error) {
// get configmap, which is the lock
lockName := "memcached-operator-lock"
lock := v1.ConfigMap{}
err := wait.Poll(retryInterval, timeout, func() (done bool, err error) {
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: lockName, Namespace: namespace}, &lock)
if err != nil {
if apierrors.IsNotFound(err) {
t.Logf("Waiting for availability of leader lock configmap %s\n", lockName)
return false, nil
}
return false, err
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("error getting leader lock configmap: %v\n", err)
}
t.Logf("Found leader lock configmap %s\n", lockName)
owners := lock.GetOwnerReferences()
if len(owners) != 1 {
return nil, fmt.Errorf("leader lock has %d owner refs, expected 1", len(owners))
}
owner := owners[0]
// get operator pods
pods := v1.PodList{}
opts := client.ListOptions{Namespace: namespace}
for k, v := range labels {
if err := opts.SetLabelSelector(fmt.Sprintf("%s=%s", k, v)); err != nil {
return nil, fmt.Errorf("failed to set list label selector: (%v)", err)
}
}
if err := opts.SetFieldSelector("status.phase=Running"); err != nil {
t.Fatalf("Failed to set list field selector: (%v)", err)
}
err = f.Client.List(context.TODO(), &opts, &pods)
if err != nil {
return nil, err
}
if len(pods.Items) != 2 {
return nil, fmt.Errorf("expected 2 pods, found %d", len(pods.Items))
}
// find and return the leader
for _, pod := range pods.Items {
if pod.Name == owner.Name {
return &pod, nil
}
}
return nil, fmt.Errorf("did not find operator pod that was referenced by configmap")
}
func memcachedScaleTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
// create example-memcached yaml file
filename := "deploy/cr.yaml"
err := ioutil.WriteFile(filename,
[]byte(crYAML),
fileutil.DefaultFileMode)
if err != nil {
return err
}
// create memcached custom resource
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
return err
}
t.Log("Created cr")
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
// wait for example-memcached to reach 3 replicas
err = e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-memcached", 3, retryInterval, timeout)
if err != nil {
return err
}
// get fresh copy of memcached object as unstructured
obj := unstructured.Unstructured{}
jsonSpec, err := yaml.YAMLToJSON([]byte(crYAML))
if err != nil {
return fmt.Errorf("could not convert yaml file to json: %v", err)
}
if err := obj.UnmarshalJSON(jsonSpec); err != nil {
t.Fatalf("Failed to unmarshal memcached CR: (%v)", err)
}
obj.SetNamespace(namespace)
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, &obj)
if err != nil {
return fmt.Errorf("failed to get memcached object: %s", err)
}
// update memcached CR size to 4
spec, ok := obj.Object["spec"].(map[string]interface{})
if !ok {
return errors.New("memcached object missing spec field")
}
spec["size"] = 4
err = f.Client.Update(context.TODO(), &obj)
if err != nil {
return err
}
// wait for example-memcached to reach 4 replicas
return e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-memcached", 4, retryInterval, timeout)
}
func MemcachedLocal(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
cmd := exec.Command("operator-sdk", "up", "local", "--namespace="+namespace)
stderr, err := os.Create("stderr.txt")
if err != nil {
t.Fatalf("Failed to create stderr.txt: %v", err)
}
cmd.Stderr = stderr
defer func() {
if err := stderr.Close(); err != nil && !fileutil.IsClosedError(err) {
t.Errorf("Failed to close stderr: (%v)", err)
}
}()
err = cmd.Start()
if err != nil {
t.Fatalf("Error: %v", err)
}
ctx.AddCleanupFn(func() error { return cmd.Process.Signal(os.Interrupt) })
// wait for operator to start (may take a minute to compile the command...)
err = wait.Poll(time.Second*5, time.Second*100, func() (done bool, err error) {
file, err := ioutil.ReadFile("stderr.txt")
if err != nil {
return false, err
}
if len(file) == 0 {
return false, nil
}
return true, nil
})
if err != nil {
t.Fatalf("Local operator not ready after 100 seconds: %v\n", err)
}
if err = memcachedScaleTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
}
func MemcachedCluster(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
operatorYAML, err := ioutil.ReadFile("deploy/operator.yaml")
if err != nil {
t.Fatalf("Could not read deploy/operator.yaml: %v", err)
}
local := *e2eImageName == ""
if local {
*e2eImageName = "quay.io/example/memcached-operator:v0.0.1"
if err != nil {
t.Fatal(err)
}
operatorYAML = bytes.Replace(operatorYAML, []byte("imagePullPolicy: Always"), []byte("imagePullPolicy: Never"), 1)
err = ioutil.WriteFile("deploy/operator.yaml", operatorYAML, fileutil.DefaultFileMode)
if err != nil {
t.Fatal(err)
}
}
operatorYAML = bytes.Replace(operatorYAML, []byte("REPLACE_IMAGE"), []byte(*e2eImageName), 1)
err = ioutil.WriteFile("deploy/operator.yaml", operatorYAML, os.FileMode(0644))
if err != nil |
t.Log("Building operator docker image")
cmdOut, err := exec.Command("operator-sdk", "build", *e2eImageName,
"--enable-tests",
"--test-location", "./test/e2e",
"--namespaced-manifest", "deploy/operator.yaml").CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
if !local {
t.Log("Pushing docker image to repo")
cmdOut, err = exec.Command("docker", "push", *e2eImageName).CombinedOutput()
if err != nil {
t.Fatalf("Error: %v\nCommand Output: %s\n", err, string(cmdOut))
}
}
file, err := yamlutil.GenerateCombinedNamespacedManifest(scaffold.DeployDir)
if err != nil {
t.Fatal(err)
}
// create namespaced resources
filename := file.Name()
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created namespaced resources")
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
// wait for memcached-operator to be ready
err = e2eutil.WaitForOperatorDeployment(t, framework.Global.KubeClient, namespace, operatorName, 2, retryInterval, timeout)
if err != nil {
t.Fatal(err)
}
if err = memcachedLeaderTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
if err = memcachedScaleTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
if err = memcachedMetricsTest(t, framework.Global, ctx); err != nil {
t.Fatal(err)
}
}
func MemcachedClusterTest(t *testing.T) {
// get global framework variables
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
// create sa
filename := "deploy/service_account.yaml"
framework.Global.NamespacedManPath = &filename
err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created sa")
// create rbac
filename = "deploy/role.yaml"
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created role")
filename = "deploy/role_binding.yaml"
framework.Global.NamespacedManPath = &filename
err = ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatal(err)
}
t.Log("Created role_binding")
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatalf("Could not get namespace: %v", err)
}
cmdOut, err := exec.Command("operator-sdk", "test", "cluster", *e2eImageName,
"--namespace", namespace,
"--image-pull-policy", "Never",
"--service-account", operatorName).CombinedOutput()
if err != nil {
t.Fatalf("In-cluster test failed: %v\nCommand Output:\n%s", err, string(cmdOut))
}
}
func memcachedMetricsTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error {
namespace, err := ctx.GetNamespace()
if err != nil {
return err
}
// Make sure metrics Service exists
s := v1.Service{}
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: operatorName, Namespace: namespace}, &s)
if err != nil {
return fmt.Errorf("could not get metrics Service: (%v)", err)
}
// Get operator pod
pods := v1.PodList{}
opts := client.InNamespace(namespace)
if len(s.Spec.Selector) == 0 {
return fmt.Errorf("no labels found in metrics Service")
}
for k, v := range s.Spec.Selector {
if err := opts.SetLabelSelector(fmt.Sprintf("%s=%s", k, v)); err != nil {
return fmt.Errorf("failed to set list label selector: (%v)", err)
}
}
if err := opts.SetFieldSelector("status.phase=Running"); err != nil {
return fmt.Errorf("failed to set list field selector: (%v)", err)
}
err = f.Client.List(context.TODO(), opts, &pods)
if err != nil {
return fmt.Errorf("failed to get pods: (%v)", err)
}
podName := ""
numPods := len(pods.Items)
// TODO(lili): Remove below logic when we enable exposing metrics in all pods.
if numPods == 0 {
podName = pods.Items[0].Name
} else if numPods > 1 {
// If we got more than one pod, get leader pod name.
leader, err := verifyLeader(t, namespace, f, s.Spec.Selector)
if err != nil {
return err
}
podName = leader.Name
} else {
return fmt.Errorf("failed to get operator pod: could not select any pods with Service selector %v", s.Spec.Selector)
}
// Pod name must be there, otherwise we cannot read metrics data via pod proxy.
if podName == "" {
return fmt.Errorf("failed to get pod name")
}
// Get metrics data
request := proxyViaPod(f.KubeClient, namespace, podName, "8383", "/metrics")
response, err := request.DoRaw()
if err != nil {
return fmt.Errorf("failed to get response from metrics: %v", err)
}
// Make sure metrics are present
if len(response) == 0 {
return fmt.Errorf("metrics body is empty")
}
// Perform prometheus metrics lint checks
l := promlint.New(bytes.NewReader(response))
problems, err := l.Lint()
if err != nil {
return fmt.Errorf("failed to lint metrics: %v", err)
}
// TODO(lili): Change to 0, when we upgrade to 1.14.
// currently there is a problem with one of the metrics in upstream Kubernetes:
// `workqueue_longest_running_processor_microseconds`.
// This has been fixed in 1.14 release.
if len(problems) > 1 {
return fmt.Errorf("found problems with metrics: %#+v", problems)
}
return nil
}
func proxyViaPod(kubeClient kubernetes.Interface, namespace, podName, podPortName, path string) *rest.Request {
return kubeClient.
CoreV1().
RESTClient().
Get().
Namespace(namespace).
Resource("pods").
SubResource("proxy").
Name(fmt.Sprintf("%s:%s", podName, podPortName)).
Suffix(path)
}
| {
t.Fatalf("Failed to write deploy/operator.yaml: %v", err)
} | conditional_block |
utpgo.go | // Copyright (c) 2021 Storj Labs, Inc.
// Copyright (c) 2010 BitTorrent, Inc.
// See LICENSE for copying information.
package utp
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"os"
"runtime/pprof"
"sync"
"syscall"
"time"
"github.com/go-logr/logr"
"storj.io/utp-go/buffers"
"storj.io/utp-go/libutp"
)
// Buffer for data before it gets to µTP (there is another "send buffer" in
// the libutp code, but it is for managing flow control window sizes.
const (
readBufferSize = 200000
writeBufferSize = 200000
)
var noopLogger = logr.DiscardLogger{}
type Addr net.UDPAddr
func (a *Addr) Network() string { return "utp" }
func (a *Addr) String() string { return (*net.UDPAddr)(a).String() }
type Conn struct {
utpSocket
logger logr.Logger
baseConn *libutp.Socket
// set to true if the socket will close once the write buffer is empty
willClose bool
// set to true once the libutp-layer Close has been called
libutpClosed bool
// set to true when the socket has been closed by the remote side (or the
// conn has experienced a timeout or other fatal error)
remoteIsDone bool
// set to true if a read call is pending
readPending bool
// set to true if a write call is pending
writePending bool
// closed when Close() is called
closeChan chan struct{}
// closed when baseConn has entered StateDestroying
baseConnDestroyed chan struct{}
// readBuffer tracks data that has been read on a particular Conn, but
// not yet consumed by the application.
readBuffer *buffers.SyncCircularBuffer
// writeBuffer tracks data that needs to be sent on this Conn, which is
// has not yet been collected by µTP.
writeBuffer *buffers.SyncCircularBuffer
readDeadline time.Time
writeDeadline time.Time
// Set to true while waiting for a connection to complete (got
// state=StateConnect). The connectChan channel will be closed once this
// is set.
connecting bool
connectChan chan struct{}
}
type Listener struct {
utpSocket
acceptChan <-chan *Conn
}
// utpSocket is shared functionality between Conn and Listener.
type utpSocket struct {
localAddr *net.UDPAddr
// manager is shared by all sockets using the same local address
// (for outgoing connections, only the one connection, but for incoming
// connections, this includes all connections received by the associated
// listening socket). It is reference-counted, and thus will only be
// cleaned up entirely when the last related socket is closed.
manager *socketManager
// changes to encounteredError, manager, or other state variables in Conn
// or Listener should all be protected with this lock. If it must be
// acquired at the same time as manager.baseConnLock, the
// manager.baseConnLock must be acquired first.
stateLock sync.Mutex
// Once set, all further Write/Read operations should fail with this error.
encounteredError error
}
func Dial(network, address string) (net.Conn, error) {
return DialOptions(network, address)
}
func DialContext(ctx context.Context, network, address string) (net.Conn, error) {
return DialOptions(network, address, WithContext(ctx))
}
func DialOptions(network, address string, options ...ConnectOption) (net.Conn, error) {
switch network {
case "utp", "utp4", "utp6":
default:
return nil, fmt.Errorf("network %s not supported", network)
}
rAddr, err := ResolveUTPAddr(network, address)
if err != nil {
| return DialUTPOptions(network, nil, rAddr, options...)
}
func DialUTP(network string, localAddr, remoteAddr *Addr) (net.Conn, error) {
return DialUTPOptions(network, localAddr, remoteAddr)
}
func DialUTPOptions(network string, localAddr, remoteAddr *Addr, options ...ConnectOption) (net.Conn, error) {
s := utpDialState{
logger: &noopLogger,
ctx: context.Background(),
tlsConfig: nil,
}
for _, opt := range options {
opt.apply(&s)
}
conn, err := dial(s.ctx, s.logger, network, localAddr, remoteAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil {
return tls.Client(conn, s.tlsConfig), nil
}
return conn, nil
}
func dial(ctx context.Context, logger logr.Logger, network string, localAddr, remoteAddr *Addr) (*Conn, error) {
managerLogger := logger.WithValues("remote-addr", remoteAddr)
manager, err := newSocketManager(managerLogger, network, (*net.UDPAddr)(localAddr), (*net.UDPAddr)(remoteAddr))
if err != nil {
return nil, err
}
localUDPAddr := manager.LocalAddr().(*net.UDPAddr)
// different from managerLogger in case local addr interface and/or port
// has been clarified
connLogger := logger.WithValues("local-addr", localUDPAddr, "remote-addr", remoteAddr, "dir", "out")
utpConn := &Conn{
utpSocket: utpSocket{
localAddr: localUDPAddr,
manager: manager,
},
logger: connLogger.WithName("utp-conn"),
connecting: true,
connectChan: make(chan struct{}),
closeChan: make(chan struct{}),
baseConnDestroyed: make(chan struct{}),
readBuffer: buffers.NewSyncBuffer(readBufferSize),
writeBuffer: buffers.NewSyncBuffer(writeBufferSize),
}
connLogger.V(10).Info("creating outgoing socket")
// thread-safe here, because no other goroutines could have a handle to
// this mx yet.
utpConn.baseConn, err = manager.mx.Create(packetSendCallback, manager, (*net.UDPAddr)(remoteAddr))
if err != nil {
return nil, err
}
utpConn.baseConn.SetCallbacks(&libutp.CallbackTable{
OnRead: onReadCallback,
OnWrite: onWriteCallback,
GetRBSize: getRBSizeCallback,
OnState: onStateCallback,
OnError: onErrorCallback,
}, utpConn)
utpConn.baseConn.SetLogger(connLogger.WithName("utp-socket"))
manager.start()
func() {
// now that the manager's goroutines have started, we do need
// concurrency protection
manager.baseConnLock.Lock()
defer manager.baseConnLock.Unlock()
connLogger.V(10).Info("initiating libutp-level Connect()")
utpConn.baseConn.Connect()
}()
select {
case <-ctx.Done():
_ = utpConn.Close()
return nil, ctx.Err()
case <-utpConn.connectChan:
}
// connection operation is complete, successful or not; record any error met
utpConn.stateLock.Lock()
err = utpConn.encounteredError
utpConn.stateLock.Unlock()
if err != nil {
_ = utpConn.Close()
return nil, utpConn.makeOpError("dial", err)
}
return utpConn, nil
}
func Listen(network string, addr string) (net.Listener, error) {
return ListenOptions(network, addr)
}
func ListenOptions(network, addr string, options ...ConnectOption) (net.Listener, error) {
s := utpDialState{
logger: &noopLogger,
}
for _, opt := range options {
opt.apply(&s)
}
switch network {
case "utp", "utp4", "utp6":
default:
return nil, fmt.Errorf("network %s not supported", network)
}
udpAddr, err := ResolveUTPAddr(network, addr)
if err != nil {
return nil, err
}
listener, err := listen(s.logger, network, udpAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil {
return tls.NewListener(listener, s.tlsConfig), nil
}
return listener, nil
}
func ListenUTP(network string, localAddr *Addr) (*Listener, error) {
return listen(&noopLogger, network, localAddr)
}
func ListenUTPOptions(network string, localAddr *Addr, options ...ConnectOption) (*Listener, error) {
s := utpDialState{
logger: &noopLogger,
}
for _, opt := range options {
opt.apply(&s)
}
return listen(s.logger, network, localAddr)
}
func listen(logger logr.Logger, network string, localAddr *Addr) (*Listener, error) {
manager, err := newSocketManager(logger, network, (*net.UDPAddr)(localAddr), nil)
if err != nil {
return nil, err
}
udpLocalAddr := manager.LocalAddr().(*net.UDPAddr)
utpListener := &Listener{
utpSocket: utpSocket{
localAddr: udpLocalAddr,
manager: manager,
},
acceptChan: manager.acceptChan,
}
manager.start()
return utpListener, nil
}
type utpDialState struct {
logger logr.Logger
ctx context.Context
tlsConfig *tls.Config
}
type ConnectOption interface {
apply(s *utpDialState)
}
type optionLogger struct {
logger logr.Logger
}
func (o *optionLogger) apply(s *utpDialState) {
s.logger = o.logger
}
func WithLogger(logger logr.Logger) ConnectOption {
return &optionLogger{logger: logger}
}
type optionContext struct {
ctx context.Context
}
func (o *optionContext) apply(s *utpDialState) {
s.ctx = o.ctx
}
func WithContext(ctx context.Context) ConnectOption {
return &optionContext{ctx: ctx}
}
type optionTLS struct {
tlsConfig *tls.Config
}
func (o *optionTLS) apply(s *utpDialState) {
s.tlsConfig = o.tlsConfig
}
func WithTLS(tlsConfig *tls.Config) ConnectOption {
return &optionTLS{tlsConfig: tlsConfig}
}
func (c *Conn) Close() error {
// indicate our desire to close; once buffers are flushed, we can continue
c.stateLock.Lock()
if c.willClose {
c.stateLock.Unlock()
return errors.New("multiple calls to Close() not allowed")
}
c.willClose = true
c.stateLock.Unlock()
// wait for write buffer to be flushed
c.writeBuffer.FlushAndClose()
// if there are still any blocked reads, shut them down
c.readBuffer.Close()
// close baseConn
err := func() error {
// yes, even libutp.(*UTPSocket).Close() needs concurrency protection;
// it may end up invoking callbacks
c.manager.baseConnLock.Lock()
defer c.manager.baseConnLock.Unlock()
c.logger.V(10).Info("closing baseConn")
c.libutpClosed = true
return c.baseConn.Close()
}()
// wait for socket to enter StateDestroying
<-c.baseConnDestroyed
c.setEncounteredError(net.ErrClosed)
socketCloseErr := c.utpSocket.Close()
// even if err was already set, this one is likely to be more helpful/interesting.
if socketCloseErr != nil {
err = socketCloseErr
}
return err
}
func (c *Conn) SetLogger(logger logr.Logger) {
c.baseConn.SetLogger(logger)
}
func (c *Conn) Read(buf []byte) (n int, err error) {
return c.ReadContext(context.Background(), buf)
}
func (c *Conn) stateEnterRead() error {
switch {
case c.readPending:
return buffers.ReaderAlreadyWaitingErr
case c.willClose:
return c.makeOpError("read", net.ErrClosed)
case c.remoteIsDone && c.readBuffer.SpaceUsed() == 0:
return c.makeOpError("read", c.encounteredError)
}
c.readPending = true
return nil
}
func (c *Conn) ReadContext(ctx context.Context, buf []byte) (n int, err error) {
c.stateLock.Lock()
encounteredErr := c.encounteredError
deadline := c.readDeadline
err = c.stateEnterRead()
c.stateLock.Unlock()
if err != nil {
return 0, err
}
defer func() {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.readPending = false
}()
if !deadline.IsZero() {
var cancel func()
ctx, cancel = context.WithDeadline(ctx, deadline)
defer cancel()
}
for {
var ok bool
n, ok = c.readBuffer.TryConsume(buf)
if ok {
if n == 0 {
return 0, io.EOF
}
return n, nil
}
if encounteredErr != nil {
return 0, c.makeOpError("read", encounteredErr)
}
waitChan, cancelWait, err := c.readBuffer.WaitForBytesChan(1)
if err != nil {
return 0, err
}
select {
case <-ctx.Done():
cancelWait()
err = ctx.Err()
if errors.Is(err, context.DeadlineExceeded) {
// transform deadline error to os.ErrDeadlineExceeded as per
// net.Conn specification
err = c.makeOpError("read", os.ErrDeadlineExceeded)
}
return 0, err
case <-c.closeChan:
cancelWait()
return 0, c.makeOpError("read", net.ErrClosed)
case <-waitChan:
}
}
}
func (c *Conn) Write(buf []byte) (n int, err error) {
return c.WriteContext(context.Background(), buf)
}
func (c *Conn) WriteContext(ctx context.Context, buf []byte) (n int, err error) {
c.stateLock.Lock()
if c.writePending {
c.stateLock.Unlock()
return 0, buffers.WriterAlreadyWaitingErr
}
c.writePending = true
deadline := c.writeDeadline
c.stateLock.Unlock()
if err != nil {
if err == io.EOF {
// remote side closed connection cleanly, and µTP in/out streams
// are not independently closeable. Doesn't make sense to return
// an EOF from a Write method, so..
err = c.makeOpError("write", syscall.ECONNRESET)
} else if err == net.ErrClosed {
err = c.makeOpError("write", net.ErrClosed)
}
return 0, err
}
defer func() {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writePending = false
}()
if !deadline.IsZero() {
var cancel func()
ctx, cancel = context.WithDeadline(ctx, deadline)
defer cancel()
}
for {
c.stateLock.Lock()
willClose := c.willClose
remoteIsDone := c.remoteIsDone
encounteredError := c.encounteredError
c.stateLock.Unlock()
if willClose {
return 0, c.makeOpError("write", net.ErrClosed)
}
if remoteIsDone {
return 0, c.makeOpError("write", encounteredError)
}
if ok := c.writeBuffer.TryAppend(buf); ok {
// make sure µTP knows about the new bytes. this might be a bit
// confusing, but it doesn't matter if other writes occur between
// the TryAppend() above and the acquisition of the baseConnLock
// below. All that matters is that (a) there is at least one call
// to baseConn.Write scheduled to be made after this point (without
// undue blocking); (b) baseConnLock is held when that Write call
// is made; and (c) the amount of data in the write buffer does not
// decrease between the SpaceUsed() call and the start of the next
// call to onWriteCallback.
func() {
c.manager.baseConnLock.Lock()
defer c.manager.baseConnLock.Unlock()
amount := c.writeBuffer.SpaceUsed()
c.logger.V(10).Info("informing libutp layer of data for writing", "len", amount)
c.baseConn.Write(amount)
}()
return len(buf), nil
}
waitChan, cancelWait, err := c.writeBuffer.WaitForSpaceChan(len(buf))
if err != nil {
if err == buffers.IsClosedErr {
err = c.makeOpError("write", c.encounteredError)
}
return 0, err
}
// couldn't write the data yet; wait until we can, or until we hit the
// timeout, or until the conn is closed.
select {
case <-ctx.Done():
cancelWait()
err = ctx.Err()
if errors.Is(err, context.DeadlineExceeded) {
// transform deadline error to os.ErrDeadlineExceeded as per
// net.Conn specification
err = c.makeOpError("write", os.ErrDeadlineExceeded)
}
return 0, err
case <-c.closeChan:
cancelWait()
return 0, c.makeOpError("write", net.ErrClosed)
case <-waitChan:
}
}
}
func (c *Conn) RemoteAddr() net.Addr {
// GetPeerName is thread-safe
return (*Addr)(c.baseConn.GetPeerName())
}
func (c *Conn) SetReadDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.readDeadline = t
return nil
}
func (c *Conn) SetWriteDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writeDeadline = t
return nil
}
func (c *Conn) SetDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writeDeadline = t
c.readDeadline = t
return nil
}
func (c *Conn) makeOpError(op string, err error) error {
opErr := c.utpSocket.makeOpError(op, err).(*net.OpError)
opErr.Source = opErr.Addr
opErr.Addr = c.RemoteAddr()
return opErr
}
var _ net.Conn = &Conn{}
func (l *Listener) AcceptUTPContext(ctx context.Context) (*Conn, error) {
select {
case newConn, ok := <-l.acceptChan:
if ok {
return newConn, nil
}
err := l.encounteredError
if err == nil {
err = l.makeOpError("accept", net.ErrClosed)
}
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (l *Listener) AcceptUTP() (*Conn, error) {
return l.AcceptUTPContext(context.Background())
}
func (l *Listener) Accept() (net.Conn, error) {
return l.AcceptUTP()
}
func (l *Listener) AcceptContext(ctx context.Context) (net.Conn, error) {
return l.AcceptUTPContext(ctx)
}
func (l *Listener) Close() error {
return l.utpSocket.Close()
}
func (l *Listener) Addr() net.Addr {
return l.utpSocket.LocalAddr()
}
var _ net.Listener = &Listener{}
func (u *utpSocket) makeOpError(op string, err error) error {
return &net.OpError{
Op: op,
Net: "utp",
Source: nil,
Addr: u.LocalAddr(),
Err: err,
}
}
func (u *utpSocket) Close() (err error) {
u.stateLock.Lock()
if u.manager != nil {
err = u.manager.decrementReferences()
u.manager = nil
}
u.stateLock.Unlock()
return err
}
func (c *Conn) setEncounteredError(err error) {
if err == nil {
return
}
c.stateLock.Lock()
defer c.stateLock.Unlock()
// keep the first error if this is called multiple times
if c.encounteredError == nil {
c.encounteredError = err
}
if c.connecting {
c.connecting = false
close(c.connectChan)
}
}
func (u *utpSocket) LocalAddr() net.Addr {
return (*Addr)(u.localAddr)
}
type socketManager struct {
mx *libutp.SocketMultiplexer
logger logr.Logger
udpSocket *net.UDPConn
// this lock should be held when invoking any libutp functions or methods
// that are not thread-safe or which themselves might invoke callbacks
// (that is, nearly all libutp functions or methods). It can be assumed
// that this lock is held in callbacks.
baseConnLock sync.Mutex
refCountLock sync.Mutex
refCount int
// cancelManagement is a cancel function that should be called to close
// down the socket management goroutines. The main managing goroutine
// should clean up and return any close error on closeErr.
cancelManagement func()
// closeErr is a channel on which the managing goroutine will return any
// errors from a close operation when all is complete.
closeErr chan error
// to be allocated with a buffer the size of the intended backlog. There
// can be at most one utpSocket able to receive on this channel (one
// Listener for any given UDP socket).
acceptChan chan *Conn
// just a way to accumulate errors in sending or receiving on the UDP
// socket; this may cause future Write/Read method calls to return the
// error in the future
socketErrors []error
socketErrorsLock sync.Mutex
pollInterval time.Duration
}
const (
defaultUTPConnBacklogSize = 5
)
func newSocketManager(logger logr.Logger, network string, localAddr, remoteAddr *net.UDPAddr) (*socketManager, error) {
switch network {
case "utp", "utp4", "utp6":
default:
op := "dial"
if remoteAddr == nil {
op = "listen"
}
return nil, &net.OpError{Op: op, Net: network, Source: localAddr, Addr: remoteAddr, Err: net.UnknownNetworkError(network)}
}
udpNetwork := "udp" + network[3:]
// thread-safe here; don't need baseConnLock
mx := libutp.NewSocketMultiplexer(logger.WithName("mx").WithValues("local-addr", localAddr.String()), nil)
udpSocket, err := net.ListenUDP(udpNetwork, localAddr)
if err != nil {
return nil, err
}
sm := &socketManager{
mx: mx,
logger: logger.WithName("manager").WithValues("local-addr", udpSocket.LocalAddr()),
udpSocket: udpSocket,
refCount: 1,
closeErr: make(chan error),
acceptChan: make(chan *Conn, defaultUTPConnBacklogSize),
pollInterval: 5 * time.Millisecond,
}
return sm, nil
}
func (sm *socketManager) start() {
ctx, cancel := context.WithCancel(context.Background())
sm.cancelManagement = cancel
managementLabels := pprof.Labels(
"name", "socket-management", "udp-socket", sm.udpSocket.LocalAddr().String())
receiverLabels := pprof.Labels(
"name", "udp-receiver", "udp-socket", sm.udpSocket.LocalAddr().String())
go func() {
pprof.Do(ctx, managementLabels, sm.socketManagement)
}()
go func() {
pprof.Do(ctx, receiverLabels, sm.udpMessageReceiver)
}()
}
func (sm *socketManager) LocalAddr() net.Addr {
return sm.udpSocket.LocalAddr()
}
func (sm *socketManager) socketManagement(ctx context.Context) {
timer := time.NewTimer(sm.pollInterval)
defer timer.Stop()
for {
timer.Reset(sm.pollInterval)
select {
case <-ctx.Done():
// at this point, all attached Conn instances should be
// closed already
sm.internalClose()
return
case <-timer.C:
}
sm.checkTimeouts()
}
}
func (sm *socketManager) processIncomingPacket(data []byte, destAddr *net.UDPAddr) {
sm.baseConnLock.Lock()
defer sm.baseConnLock.Unlock()
sm.mx.IsIncomingUTP(gotIncomingConnectionCallback, packetSendCallback, sm, data, destAddr)
}
func (sm *socketManager) checkTimeouts() {
sm.baseConnLock.Lock()
defer sm.baseConnLock.Unlock()
sm.mx.CheckTimeouts()
}
func (sm *socketManager) internalClose() {
err := sm.udpSocket.Close()
sm.mx = nil
sm.closeErr <- err
close(sm.closeErr)
close(sm.acceptChan)
}
func (sm *socketManager) incrementReferences() {
sm.refCountLock.Lock()
sm.refCount++
sm.refCountLock.Unlock()
}
func (sm *socketManager) decrementReferences() error {
sm.refCountLock.Lock()
defer sm.refCountLock.Unlock()
sm.refCount--
if sm.refCount == 0 {
sm.logger.V(1).Info("closing socketManager")
sm.cancelManagement()
return <-sm.closeErr
}
if sm.refCount < 0 {
return errors.New("socketManager closed too many times")
}
return nil
}
func (sm *socketManager) udpMessageReceiver(ctx context.Context) {
// thread-safe; don't need baseConnLock for GetUDPMTU
bufSize := libutp.GetUDPMTU(sm.LocalAddr().(*net.UDPAddr))
// It turns out GetUDPMTU is frequently wrong, and when it gives us a lower
// number than the real MTU, and the other side is sending bigger packets,
// then we end up not being able to read the full packets. Start with a
// receive buffer twice as big as we thought we might need, and increase it
// further from there if needed.
bufSize *= 2
sm.logger.V(0).Info("udp message receiver started", "receive-buf-size", bufSize, "local-addr", sm.LocalAddr())
b := make([]byte, bufSize)
for {
n, _, flags, addr, err := sm.udpSocket.ReadMsgUDP(b, nil)
if err != nil {
if ctx.Err() != nil {
// we expect an error here; the socket has been closed; it's fine
return
}
sm.registerSocketError(err)
continue
}
if flags & syscall.MSG_TRUNC != 0 {
// we didn't get the whole packet. don't pass it on to µTP; it
// won't recognize the truncation and will pretend like that's
// all the data there is. let the packet loss detection stuff
// do its part instead.
continue
}
sm.logger.V(10).Info("udp received bytes", "len", n, "remote-addr", addr)
sm.processIncomingPacket(b[:n], addr)
}
}
func (sm *socketManager) registerSocketError(err error) {
sm.socketErrorsLock.Lock()
defer sm.socketErrorsLock.Unlock()
sm.logger.Error(err, "socket error")
sm.socketErrors = append(sm.socketErrors, err)
}
func gotIncomingConnectionCallback(userdata interface{}, newBaseConn *libutp.Socket) {
sm := userdata.(*socketManager)
remoteAddr := sm.udpSocket.RemoteAddr()
if remoteAddr != nil {
// this is not a listening-mode socket! we'll reject this spurious packet
_ = newBaseConn.Close()
return
}
sm.incrementReferences()
connLogger := sm.logger.WithName("utp-socket").WithValues("dir", "in", "remote-addr", newBaseConn.GetPeerName())
newUTPConn := &Conn{
utpSocket: utpSocket{
localAddr: sm.LocalAddr().(*net.UDPAddr),
manager: sm,
},
logger: connLogger,
baseConn: newBaseConn,
closeChan: make(chan struct{}),
baseConnDestroyed: make(chan struct{}),
readBuffer: buffers.NewSyncBuffer(readBufferSize),
writeBuffer: buffers.NewSyncBuffer(writeBufferSize),
}
newBaseConn.SetCallbacks(&libutp.CallbackTable{
OnRead: onReadCallback,
OnWrite: onWriteCallback,
GetRBSize: getRBSizeCallback,
OnState: onStateCallback,
OnError: onErrorCallback,
}, newUTPConn)
sm.logger.V(1).Info("accepted new connection", "remote-addr", newUTPConn.RemoteAddr())
select {
case sm.acceptChan <- newUTPConn:
// it's the socketManager's problem now
default:
sm.logger.Info("dropping new connection because full backlog", "remote-addr", newUTPConn.RemoteAddr())
// The accept backlog is full; drop this new connection. We can't call
// (*Conn).Close() from here, because the baseConnLock is already held.
// Fortunately, most of the steps done there aren't necessary here
// because we have never exposed this instance to the user.
_ = newUTPConn.baseConn.Close()
// This step will decref the socketManager back to where it was before
// this instance was created.
_ = newUTPConn.manager.decrementReferences()
newUTPConn.manager = nil
}
}
func packetSendCallback(userdata interface{}, buf []byte, addr *net.UDPAddr) {
sm := userdata.(*socketManager)
sm.logger.V(10).Info("udp sending bytes", "len", len(buf), "remote-addr", addr.String())
_, err := sm.udpSocket.WriteToUDP(buf, addr)
if err != nil {
sm.registerSocketError(err)
}
}
func onReadCallback(userdata interface{}, buf []byte) {
c := userdata.(*Conn)
c.stateLock.Lock()
c.stateDebugLogLocked("entering onReadCallback", "got-bytes", len(buf))
isClosing := c.willClose
c.stateLock.Unlock()
if isClosing {
// the local side has closed the connection; they don't want any additional data
return
}
if ok := c.readBuffer.TryAppend(buf); !ok {
// I think this should not happen; the flow control mechanism should
// keep us from getting more data than the (libutp-level) receive
// buffer can hold.
used := c.readBuffer.SpaceUsed()
avail := c.readBuffer.SpaceAvailable()
c.logger.Error(nil, "receive buffer overflow", "buffer-size", used+avail, "buffer-holds", c.readBuffer.SpaceUsed(), "new-data", len(buf))
panic("receive buffer overflow")
}
c.stateDebugLog("finishing onReadCallback")
}
func onWriteCallback(userdata interface{}, buf []byte) {
c := userdata.(*Conn)
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.stateDebugLogLocked("entering onWriteCallback", "accepting-bytes", len(buf))
ok := c.writeBuffer.TryConsumeFull(buf)
if !ok {
// I think this should not happen; this callback should only be called
// with data less than or equal to the number we pass in with
// libutp.(*Socket).Write(). That gets passed in under the
// baseConnLock, and this gets called under that same lock, so it also
// shouldn't be possible for something to pull data from the write
// buffer between that point and this point.
panic("send buffer underflow")
}
c.stateDebugLogLocked("finishing onWriteCallback")
}
func getRBSizeCallback(userdata interface{}) int {
c := userdata.(*Conn)
return c.readBuffer.SpaceUsed()
}
func (c *Conn) onConnectOrWritable(state libutp.State) {
c.stateLock.Lock()
c.stateDebugLogLocked("entering onConnectOrWritable", "libutp-state", state)
if c.connecting {
c.connecting = false
close(c.connectChan)
}
c.stateLock.Unlock()
if writeAmount := c.writeBuffer.SpaceUsed(); writeAmount > 0 {
c.logger.V(10).Info("initiating write to libutp layer", "len", writeAmount)
c.baseConn.Write(writeAmount)
} else {
c.logger.V(10).Info("nothing to write")
}
c.stateDebugLog("finishing onConnectOrWritable")
}
func (c *Conn) onConnectionFailure(err error) {
c.stateDebugLog("entering onConnectionFailure", "err-text", err.Error())
// mark EOF as encountered error, so that it gets returned from
// subsequent Read calls
c.setEncounteredError(err)
// clear out write buffer; we won't be able to send it now. If a call
// to Close() is already waiting, we don't need to make it wait any
// longer
c.writeBuffer.Close()
// this will allow any pending reads to complete (as short reads)
c.readBuffer.CloseForWrites()
c.stateDebugLog("finishing onConnectionFailure")
}
// the baseConnLock should already be held when this callback is entered
func onStateCallback(userdata interface{}, state libutp.State) {
c := userdata.(*Conn)
switch state {
case libutp.StateConnect, libutp.StateWritable:
c.onConnectOrWritable(state)
case libutp.StateEOF:
c.onConnectionFailure(io.EOF)
case libutp.StateDestroying:
close(c.baseConnDestroyed)
}
}
// This could be ECONNRESET, ECONNREFUSED, or ETIMEDOUT.
//
// the baseConnLock should already be held when this callback is entered
func onErrorCallback(userdata interface{}, err error) {
c := userdata.(*Conn)
c.logger.Error(err, "onError callback from libutp layer")
// we have to treat this like a total connection failure
c.onConnectionFailure(err)
// and we have to cover a corner case where this error was encountered
// _during_ the libutp Close() call- in this case, libutp would sit
// forever and never get to StateDestroying, so we have to prod it again.
if c.libutpClosed {
if err := c.baseConn.Close(); err != nil {
c.logger.Error(err, "error from libutp layer Close()")
}
}
}
func ResolveUTPAddr(network, address string) (*Addr, error) {
switch network {
case "utp", "utp4", "utp6":
udpNetwork := "udp" + network[3:]
udpAddr, err := net.ResolveUDPAddr(udpNetwork, address)
if err != nil {
return nil, err
}
return (*Addr)(udpAddr), nil
}
return nil, net.UnknownNetworkError(network)
}
| return nil, err
}
| conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.